From e9ef08bdc189e98610bc4b9a6e6f19bc3793b2c8 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 8 Dec 2006 16:06:01 -0800 Subject: [IA64] Itanium MC Error Injection Tool: Kernel configuration This patch has kenrel configuration changes for the MC Error Injection Tool. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index fcacfe2..f1e7cc1 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -429,6 +429,16 @@ config IA64_PALINFO To use this option, you have to ensure that the "/proc file system support" (CONFIG_PROC_FS) is enabled, too. +config IA64_MC_ERR_INJECT + tristate "MC error injection support" + help + Selets whether support for MC error injection. By enabling the + support, kernel provide sysfs interface for user application to + call MC error injection PAL procedure to inject various errors. + This is a useful tool for MCA testing. + + If you're unsure, do not select this option. + config SGI_SN def_bool y if (IA64_SGI_SN2 || IA64_GENERIC) diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig index 9001b3f..327e1f5 100644 --- a/arch/ia64/defconfig +++ b/arch/ia64/defconfig @@ -144,6 +144,7 @@ CONFIG_COMPAT=y CONFIG_IA64_MCA_RECOVERY=y CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y +# CONFIG_MC_ERR_INJECT is not set CONFIG_SGI_SN=y # -- cgit v0.10.2 From bf6285278418f1dc6f07296bbb286da0bfe26d5d Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 8 Dec 2006 16:14:22 -0800 Subject: [IA64] Itanium MC Error Injection Tool: Doc and sample application This patch contains a documention and sample application. Since the sample application has ~1000 lines of code, it might not be suitable in a kernel documention in kenrel tree. If you think this is not good place to hold the sample application, please let me know and I'm open to other choices e.g. sourceforge etc. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck diff --git a/Documentation/ia64/err_inject.txt b/Documentation/ia64/err_inject.txt new file mode 100644 index 0000000..26487c1 --- /dev/null +++ b/Documentation/ia64/err_inject.txt @@ -0,0 +1,1068 @@ + +IPF Machine Check (MC) error inject tool +======================================== + +IPF Machine Check (MC) error inject tool is used to inject MC +errors from Linux. The tool is a test bed for IPF MC work flow including +hardware correctable error handling, OS recoverable error handling, MC +event logging, etc. + +The tool includes two parts: a kernel driver and a user application +sample. The driver provides interface to PAL to inject error +and query error injection capabilities. The driver code is in +arch/ia64/kernel/err_inject.c. The application sample (shown below) +provides a combination of various errors and calls the driver's interface +(sysfs interface) to inject errors or query error injection capabilities. + +The tool can be used to test Intel IPF machine MC handling capabilities. +It's especially useful for people who can not access hardware MC injection +tool to inject error. It's also very useful to integrate with other +software test suits to do stressful testing on IPF. + +Below is a sample application as part of the whole tool. The sample +can be used as a working test tool. Or it can be expanded to include +more features. It also can be a integrated into a libary or other user +application to have more thorough test. + +The sample application takes err.conf as error configuation input. Gcc +compiles the code. After you install err_inject driver, you can run +this sample application to inject errors. + +Errata: Itanium 2 Processors Specification Update lists some errata against +the pal_mc_error_inject PAL procedure. The following err.conf has been tested +on latest Montecito PAL. + +err.conf: + +#This is configuration file for err_inject_tool. +#The format of the each line is: +#cpu, loop, interval, err_type_info, err_struct_info, err_data_buffer +#where +# cpu: logical cpu number the error will be inject in. +# loop: times the error will be injected. +# interval: In second. every so often one error is injected. +# err_type_info, err_struct_info: PAL parameters. +# +#Note: All values are hex w/o or w/ 0x prefix. + + +#On cpu2, inject only total 0x10 errors, interval 5 seconds +#corrected, data cache, hier-2, physical addr(assigned by tool code). +#working on Montecito latest PAL. +2, 10, 5, 4101, 95 + +#On cpu4, inject and consume total 0x10 errors, interval 5 seconds +#corrected, data cache, hier-2, physical addr(assigned by tool code). +#working on Montecito latest PAL. +4, 10, 5, 4109, 95 + +#On cpu15, inject and consume total 0x10 errors, interval 5 seconds +#recoverable, DTR0, hier-2. +#working on Montecito latest PAL. +0xf, 0x10, 5, 4249, 15 + +The sample application source code: + +err_injection_tool.c: + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Copyright (C) 2006 Intel Co + * Fenghua Yu + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_FN_SIZE 256 +#define MAX_BUF_SIZE 256 +#define DATA_BUF_SIZE 256 +#define NR_CPUS 512 +#define MAX_TASK_NUM 2048 +#define MIN_INTERVAL 5 // seconds +#define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte. +#define PARA_FIELD_NUM 5 +#define MASK_SIZE (NR_CPUS/64) +#define PATH_FORMAT "/sys/devices/system/node/node0/cpu%d/err_inject/" + +int sched_setaffinity(pid_t pid, unsigned int len, unsigned long *mask); + +int verbose; +#define vbprintf if (verbose) printf + +int log_info(int cpu, const char *fmt, ...) +{ + FILE *log; + char fn[MAX_FN_SIZE]; + char buf[MAX_BUF_SIZE]; + va_list args; + + sprintf(fn, "%d.log", cpu); + log=fopen(fn, "a+"); + if (log==NULL) { + perror("Error open:"); + return -1; + } + + va_start(args, fmt); + vprintf(fmt, args); + memset(buf, 0, MAX_BUF_SIZE); + vsprintf(buf, fmt, args); + va_end(args); + + fwrite(buf, sizeof(buf), 1, log); + fclose(log); + + return 0; +} + +typedef unsigned long u64; +typedef unsigned int u32; + +typedef union err_type_info_u { + struct { + u64 mode : 3, /* 0-2 */ + err_inj : 3, /* 3-5 */ + err_sev : 2, /* 6-7 */ + err_struct : 5, /* 8-12 */ + struct_hier : 3, /* 13-15 */ + reserved : 48; /* 16-63 */ + } err_type_info_u; + u64 err_type_info; +} err_type_info_t; + +typedef union err_struct_info_u { + struct { + u64 siv : 1, /* 0 */ + c_t : 2, /* 1-2 */ + cl_p : 3, /* 3-5 */ + cl_id : 3, /* 6-8 */ + cl_dp : 1, /* 9 */ + reserved1 : 22, /* 10-31 */ + tiv : 1, /* 32 */ + trigger : 4, /* 33-36 */ + trigger_pl : 3, /* 37-39 */ + reserved2 : 24; /* 40-63 */ + } err_struct_info_cache; + struct { + u64 siv : 1, /* 0 */ + tt : 2, /* 1-2 */ + tc_tr : 2, /* 3-4 */ + tr_slot : 8, /* 5-12 */ + reserved1 : 19, /* 13-31 */ + tiv : 1, /* 32 */ + trigger : 4, /* 33-36 */ + trigger_pl : 3, /* 37-39 */ + reserved2 : 24; /* 40-63 */ + } err_struct_info_tlb; + struct { + u64 siv : 1, /* 0 */ + regfile_id : 4, /* 1-4 */ + reg_num : 7, /* 5-11 */ + reserved1 : 20, /* 12-31 */ + tiv : 1, /* 32 */ + trigger : 4, /* 33-36 */ + trigger_pl : 3, /* 37-39 */ + reserved2 : 24; /* 40-63 */ + } err_struct_info_register; + struct { + u64 reserved; + } err_struct_info_bus_processor_interconnect; + u64 err_struct_info; +} err_struct_info_t; + +typedef union err_data_buffer_u { + struct { + u64 trigger_addr; /* 0-63 */ + u64 inj_addr; /* 64-127 */ + u64 way : 5, /* 128-132 */ + index : 20, /* 133-152 */ + : 39; /* 153-191 */ + } err_data_buffer_cache; + struct { + u64 trigger_addr; /* 0-63 */ + u64 inj_addr; /* 64-127 */ + u64 way : 5, /* 128-132 */ + index : 20, /* 133-152 */ + reserved : 39; /* 153-191 */ + } err_data_buffer_tlb; + struct { + u64 trigger_addr; /* 0-63 */ + } err_data_buffer_register; + struct { + u64 reserved; /* 0-63 */ + } err_data_buffer_bus_processor_interconnect; + u64 err_data_buffer[ERR_DATA_BUFFER_SIZE]; +} err_data_buffer_t; + +typedef union capabilities_u { + struct { + u64 i : 1, + d : 1, + rv : 1, + tag : 1, + data : 1, + mesi : 1, + dp : 1, + reserved1 : 3, + pa : 1, + va : 1, + wi : 1, + reserved2 : 20, + trigger : 1, + trigger_pl : 1, + reserved3 : 30; + } capabilities_cache; + struct { + u64 d : 1, + i : 1, + rv : 1, + tc : 1, + tr : 1, + reserved1 : 27, + trigger : 1, + trigger_pl : 1, + reserved2 : 30; + } capabilities_tlb; + struct { + u64 gr_b0 : 1, + gr_b1 : 1, + fr : 1, + br : 1, + pr : 1, + ar : 1, + cr : 1, + rr : 1, + pkr : 1, + dbr : 1, + ibr : 1, + pmc : 1, + pmd : 1, + reserved1 : 3, + regnum : 1, + reserved2 : 15, + trigger : 1, + trigger_pl : 1, + reserved3 : 30; + } capabilities_register; + struct { + u64 reserved; + } capabilities_bus_processor_interconnect; +} capabilities_t; + +typedef struct resources_s { + u64 ibr0 : 1, + ibr2 : 1, + ibr4 : 1, + ibr6 : 1, + dbr0 : 1, + dbr2 : 1, + dbr4 : 1, + dbr6 : 1, + reserved : 48; +} resources_t; + + +long get_page_size(void) +{ + long page_size=sysconf(_SC_PAGESIZE); + return page_size; +} + +#define PAGE_SIZE (get_page_size()==-1?0x4000:get_page_size()) +#define SHM_SIZE (2*PAGE_SIZE*NR_CPUS) +#define SHM_VA 0x2000000100000000 + +int shmid; +void *shmaddr; + +int create_shm(void) +{ + key_t key; + char fn[MAX_FN_SIZE]; + + /* cpu0 is always existing */ + sprintf(fn, PATH_FORMAT, 0); + if ((key = ftok(fn, 's')) == -1) { + perror("ftok"); + return -1; + } + + shmid = shmget(key, SHM_SIZE, 0644 | IPC_CREAT); + if (shmid == -1) { + if (errno==EEXIST) { + shmid = shmget(key, SHM_SIZE, 0); + if (shmid == -1) { + perror("shmget"); + return -1; + } + } + else { + perror("shmget"); + return -1; + } + } + vbprintf("shmid=%d", shmid); + + /* connect to the segment: */ + shmaddr = shmat(shmid, (void *)SHM_VA, 0); + if (shmaddr == (void*)-1) { + perror("shmat"); + return -1; + } + + memset(shmaddr, 0, SHM_SIZE); + mlock(shmaddr, SHM_SIZE); + + return 0; +} + +int free_shm() +{ + munlock(shmaddr, SHM_SIZE); + shmdt(shmaddr); + semctl(shmid, 0, IPC_RMID); + + return 0; +} + +#ifdef _SEM_SEMUN_UNDEFINED +union semun +{ + int val; + struct semid_ds *buf; + unsigned short int *array; + struct seminfo *__buf; +}; +#endif + +u32 mode=1; /* 1: physical mode; 2: virtual mode. */ +int one_lock=1; +key_t key[NR_CPUS]; +int semid[NR_CPUS]; + +int create_sem(int cpu) +{ + union semun arg; + char fn[MAX_FN_SIZE]; + int sid; + + sprintf(fn, PATH_FORMAT, cpu); + sprintf(fn, "%s/%s", fn, "err_type_info"); + if ((key[cpu] = ftok(fn, 'e')) == -1) { + perror("ftok"); + return -1; + } + + if (semid[cpu]!=0) + return 0; + + /* clear old semaphore */ + if ((sid = semget(key[cpu], 1, 0)) != -1) + semctl(sid, 0, IPC_RMID); + + /* get one semaphore */ + if ((semid[cpu] = semget(key[cpu], 1, IPC_CREAT | IPC_EXCL)) == -1) { + perror("semget"); + printf("Please remove semaphore with key=0x%lx, then run the tool.\n", + (u64)key[cpu]); + return -1; + } + + vbprintf("semid[%d]=0x%lx, key[%d]=%lx\n",cpu,(u64)semid[cpu],cpu, + (u64)key[cpu]); + /* initialize the semaphore to 1: */ + arg.val = 1; + if (semctl(semid[cpu], 0, SETVAL, arg) == -1) { + perror("semctl"); + return -1; + } + + return 0; +} + +static int lock(int cpu) +{ + struct sembuf lock; + + lock.sem_num = cpu; + lock.sem_op = 1; + semop(semid[cpu], &lock, 1); + + return 0; +} + +static int unlock(int cpu) +{ + struct sembuf unlock; + + unlock.sem_num = cpu; + unlock.sem_op = -1; + semop(semid[cpu], &unlock, 1); + + return 0; +} + +void free_sem(int cpu) +{ + semctl(semid[cpu], 0, IPC_RMID); +} + +int wr_multi(char *fn, unsigned long *data, int size) +{ + int fd; + char buf[MAX_BUF_SIZE]; + int ret; + + if (size==1) + sprintf(buf, "%lx", *data); + else if (size==3) + sprintf(buf, "%lx,%lx,%lx", data[0], data[1], data[2]); + else { + fprintf(stderr,"write to file with wrong size!\n"); + return -1; + } + + fd=open(fn, O_RDWR); + if (!fd) { + perror("Error:"); + return -1; + } + ret=write(fd, buf, sizeof(buf)); + close(fd); + return ret; +} + +int wr(char *fn, unsigned long data) +{ + return wr_multi(fn, &data, 1); +} + +int rd(char *fn, unsigned long *data) +{ + int fd; + char buf[MAX_BUF_SIZE]; + + fd=open(fn, O_RDONLY); + if (fd<0) { + perror("Error:"); + return -1; + } + read(fd, buf, MAX_BUF_SIZE); + *data=strtoul(buf, NULL, 16); + close(fd); + return 0; +} + +int rd_status(char *path, int *status) +{ + char fn[MAX_FN_SIZE]; + sprintf(fn, "%s/status", path); + if (rd(fn, (u64*)status)<0) { + perror("status reading error.\n"); + return -1; + } + + return 0; +} + +int rd_capabilities(char *path, u64 *capabilities) +{ + char fn[MAX_FN_SIZE]; + sprintf(fn, "%s/capabilities", path); + if (rd(fn, capabilities)<0) { + perror("capabilities reading error.\n"); + return -1; + } + + return 0; +} + +int rd_all(char *path) +{ + unsigned long err_type_info, err_struct_info, err_data_buffer; + int status; + unsigned long capabilities, resources; + char fn[MAX_FN_SIZE]; + + sprintf(fn, "%s/err_type_info", path); + if (rd(fn, &err_type_info)<0) { + perror("err_type_info reading error.\n"); + return -1; + } + printf("err_type_info=%lx\n", err_type_info); + + sprintf(fn, "%s/err_struct_info", path); + if (rd(fn, &err_struct_info)<0) { + perror("err_struct_info reading error.\n"); + return -1; + } + printf("err_struct_info=%lx\n", err_struct_info); + + sprintf(fn, "%s/err_data_buffer", path); + if (rd(fn, &err_data_buffer)<0) { + perror("err_data_buffer reading error.\n"); + return -1; + } + printf("err_data_buffer=%lx\n", err_data_buffer); + + sprintf(fn, "%s/status", path); + if (rd("status", (u64*)&status)<0) { + perror("status reading error.\n"); + return -1; + } + printf("status=%d\n", status); + + sprintf(fn, "%s/capabilities", path); + if (rd(fn,&capabilities)<0) { + perror("capabilities reading error.\n"); + return -1; + } + printf("capabilities=%lx\n", capabilities); + + sprintf(fn, "%s/resources", path); + if (rd(fn, &resources)<0) { + perror("resources reading error.\n"); + return -1; + } + printf("resources=%lx\n", resources); + + return 0; +} + +int query_capabilities(char *path, err_type_info_t err_type_info, + u64 *capabilities) +{ + char fn[MAX_FN_SIZE]; + err_struct_info_t err_struct_info; + err_data_buffer_t err_data_buffer; + + err_struct_info.err_struct_info=0; + memset(err_data_buffer.err_data_buffer, -1, ERR_DATA_BUFFER_SIZE*8); + + sprintf(fn, "%s/err_type_info", path); + wr(fn, err_type_info.err_type_info); + sprintf(fn, "%s/err_struct_info", path); + wr(fn, 0x0); + sprintf(fn, "%s/err_data_buffer", path); + wr_multi(fn, err_data_buffer.err_data_buffer, ERR_DATA_BUFFER_SIZE); + + // Fire pal_mc_error_inject procedure. + sprintf(fn, "%s/call_start", path); + wr(fn, mode); + + if (rd_capabilities(path, capabilities)<0) + return -1; + + return 0; +} + +int query_all_capabilities() +{ + int status; + err_type_info_t err_type_info; + int err_sev, err_struct, struct_hier; + int cap=0; + u64 capabilities; + char path[MAX_FN_SIZE]; + + err_type_info.err_type_info=0; // Initial + err_type_info.err_type_info_u.mode=0; // Query mode; + err_type_info.err_type_info_u.err_inj=0; + + printf("All capabilities implemented in pal_mc_error_inject:\n"); + sprintf(path, PATH_FORMAT ,0); + for (err_sev=0;err_sev<3;err_sev++) + for (err_struct=0;err_struct<5;err_struct++) + for (struct_hier=0;struct_hier<5;struct_hier++) + { + status=-1; + capabilities=0; + err_type_info.err_type_info_u.err_sev=err_sev; + err_type_info.err_type_info_u.err_struct=err_struct; + err_type_info.err_type_info_u.struct_hier=struct_hier; + + if (query_capabilities(path, err_type_info, &capabilities)<0) + continue; + + if (rd_status(path, &status)<0) + continue; + + if (status==0) { + cap=1; + printf("For err_sev=%d, err_struct=%d, struct_hier=%d: ", + err_sev, err_struct, struct_hier); + printf("capabilities 0x%lx\n", capabilities); + } + } + if (!cap) { + printf("No capabilities supported.\n"); + return 0; + } + + return 0; +} + +int err_inject(int cpu, char *path, err_type_info_t err_type_info, + err_struct_info_t err_struct_info, + err_data_buffer_t err_data_buffer) +{ + int status; + char fn[MAX_FN_SIZE]; + + log_info(cpu, "err_type_info=%lx, err_struct_info=%lx, ", + err_type_info.err_type_info, + err_struct_info.err_struct_info); + log_info(cpu,"err_data_buffer=[%lx,%lx,%lx]\n", + err_data_buffer.err_data_buffer[0], + err_data_buffer.err_data_buffer[1], + err_data_buffer.err_data_buffer[2]); + sprintf(fn, "%s/err_type_info", path); + wr(fn, err_type_info.err_type_info); + sprintf(fn, "%s/err_struct_info", path); + wr(fn, err_struct_info.err_struct_info); + sprintf(fn, "%s/err_data_buffer", path); + wr_multi(fn, err_data_buffer.err_data_buffer, ERR_DATA_BUFFER_SIZE); + + // Fire pal_mc_error_inject procedure. + sprintf(fn, "%s/call_start", path); + wr(fn,mode); + + if (rd_status(path, &status)<0) { + vbprintf("fail: read status\n"); + return -100; + } + + if (status!=0) { + log_info(cpu, "fail: status=%d\n", status); + return status; + } + + return status; +} + +static int construct_data_buf(char *path, err_type_info_t err_type_info, + err_struct_info_t err_struct_info, + err_data_buffer_t *err_data_buffer, + void *va1) +{ + char fn[MAX_FN_SIZE]; + u64 virt_addr=0, phys_addr=0; + + vbprintf("va1=%lx\n", (u64)va1); + memset(&err_data_buffer->err_data_buffer_cache, 0, ERR_DATA_BUFFER_SIZE*8); + + switch (err_type_info.err_type_info_u.err_struct) { + case 1: // Cache + switch (err_struct_info.err_struct_info_cache.cl_id) { + case 1: //Virtual addr + err_data_buffer->err_data_buffer_cache.inj_addr=(u64)va1; + break; + case 2: //Phys addr + sprintf(fn, "%s/virtual_to_phys", path); + virt_addr=(u64)va1; + if (wr(fn,virt_addr)<0) + return -1; + rd(fn, &phys_addr); + err_data_buffer->err_data_buffer_cache.inj_addr=phys_addr; + break; + default: + printf("Not supported cl_id\n"); + break; + } + break; + case 2: // TLB + break; + case 3: // Register file + break; + case 4: // Bus/system interconnect + default: + printf("Not supported err_struct\n"); + break; + } + + return 0; +} + +typedef struct { + u64 cpu; + u64 loop; + u64 interval; + u64 err_type_info; + u64 err_struct_info; + u64 err_data_buffer[ERR_DATA_BUFFER_SIZE]; +} parameters_t; + +parameters_t line_para; +int para; + +static int empty_data_buffer(u64 *err_data_buffer) +{ + int empty=1; + int i; + + for (i=0;iMIN_INTERVAL + ?interval:MIN_INTERVAL; + parameters[num].err_type_info=err_type_info_conf; + parameters[num].err_struct_info=err_struct_info_conf; + memcpy(parameters[num++].err_data_buffer, + err_data_buffer_conf,ERR_DATA_BUFFER_SIZE*8) ; + + if (num>=MAX_TASK_NUM) + break; + } + } + else { + parameters[0].cpu=line_para.cpu; + parameters[0].loop=line_para.loop; + parameters[0].interval= line_para.interval>MIN_INTERVAL + ?line_para.interval:MIN_INTERVAL; + parameters[0].err_type_info=line_para.err_type_info; + parameters[0].err_struct_info=line_para.err_struct_info; + memcpy(parameters[0].err_data_buffer, + line_para.err_data_buffer,ERR_DATA_BUFFER_SIZE*8) ; + + num=1; + } + + /* Create semaphore: If one_lock, one semaphore for all processors. + Otherwise, one sempaphore for each processor. */ + if (one_lock) { + if (create_sem(0)) { + printf("Can not create semaphore...exit\n"); + free_sem(0); + return -1; + } + } + else { + for (i=0;i Date: Fri, 8 Dec 2006 16:15:16 -0800 Subject: [IA64] Itanium MC Error Injection Tool: Driver sysfs interface This kernel driver patch provides sysfs interface for user application to call pal_mc_error_inject() procedure. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c new file mode 100644 index 0000000..d3e9f33 --- /dev/null +++ b/arch/ia64/kernel/err_inject.c @@ -0,0 +1,293 @@ +/* + * err_inject.c - + * 1.) Inject errors to a processor. + * 2.) Query error injection capabilities. + * This driver along with user space code can be acting as an error + * injection tool. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Written by: Fenghua Yu , Intel Corporation + * Copyright (C) 2006, Intel Corp. All rights reserved. + * + */ +#include +#include +#include +#include +#include + +#define ERR_INJ_DEBUG + +#define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte; + +#define define_one_ro(name) \ +static SYSDEV_ATTR(name, 0444, show_##name, NULL) + +#define define_one_rw(name) \ +static SYSDEV_ATTR(name, 0644, show_##name, store_##name) + +static u64 call_start[NR_CPUS]; +static u64 phys_addr[NR_CPUS]; +static u64 err_type_info[NR_CPUS]; +static u64 err_struct_info[NR_CPUS]; +static struct { + u64 data1; + u64 data2; + u64 data3; +} __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS]; +static s64 status[NR_CPUS]; +static u64 capabilities[NR_CPUS]; +static u64 resources[NR_CPUS]; + +#define show(name) \ +static ssize_t \ +show_##name(struct sys_device *dev, char *buf) \ +{ \ + u32 cpu=dev->id; \ + return sprintf(buf, "%lx\n", name[cpu]); \ +} + +#define store(name) \ +static ssize_t \ +store_##name(struct sys_device *dev, const char *buf, size_t size) \ +{ \ + unsigned int cpu=dev->id; \ + name[cpu] = simple_strtoull(buf, NULL, 16); \ + return size; \ +} + +show(call_start) + +/* It's user's responsibility to call the PAL procedure on a specific + * processor. The cpu number in driver is only used for storing data. + */ +static ssize_t +store_call_start(struct sys_device *dev, const char *buf, size_t size) +{ + unsigned int cpu=dev->id; + unsigned long call_start = simple_strtoull(buf, NULL, 16); + +#ifdef ERR_INJ_DEBUG + printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); + printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); + printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); + printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n", + err_data_buffer[cpu].data1, + err_data_buffer[cpu].data2, + err_data_buffer[cpu].data3); +#endif + switch (call_start) { + case 0: /* Do nothing. */ + break; + case 1: /* Call pal_mc_error_inject in physical mode. */ + status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu], + err_struct_info[cpu], + ia64_tpa(&err_data_buffer[cpu]), + &capabilities[cpu], + &resources[cpu]); + break; + case 2: /* Call pal_mc_error_inject in virtual mode. */ + status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu], + err_struct_info[cpu], + ia64_tpa(&err_data_buffer[cpu]), + &capabilities[cpu], + &resources[cpu]); + break; + default: + status[cpu] = -EINVAL; + break; + } + +#ifdef ERR_INJ_DEBUG + printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); + printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]); + printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); +#endif + return size; +} + +show(err_type_info) +store(err_type_info) + +static ssize_t +show_virtual_to_phys(struct sys_device *dev, char *buf) +{ + unsigned int cpu=dev->id; + return sprintf(buf, "%lx\n", phys_addr[cpu]); +} + +static ssize_t +store_virtual_to_phys(struct sys_device *dev, const char *buf, size_t size) +{ + unsigned int cpu=dev->id; + u64 virt_addr=simple_strtoull(buf, NULL, 16); + int ret; + + ret = get_user_pages(current, current->mm, virt_addr, + 1, VM_READ, 0, NULL, NULL); + if (ret<=0) { +#ifdef ERR_INJ_DEBUG + printk("Virtual address %lx is not existing.\n",virt_addr); +#endif + return -EINVAL; + } + + phys_addr[cpu] = ia64_tpa(virt_addr); + return size; +} + +show(err_struct_info) +store(err_struct_info) + +static ssize_t +show_err_data_buffer(struct sys_device *dev, char *buf) +{ + unsigned int cpu=dev->id; + + return sprintf(buf, "%lx, %lx, %lx\n", + err_data_buffer[cpu].data1, + err_data_buffer[cpu].data2, + err_data_buffer[cpu].data3); +} + +static ssize_t +store_err_data_buffer(struct sys_device *dev, const char *buf, size_t size) +{ + unsigned int cpu=dev->id; + int ret; + +#ifdef ERR_INJ_DEBUG + printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n", + err_data_buffer[cpu].data1, + err_data_buffer[cpu].data2, + err_data_buffer[cpu].data3, + cpu); +#endif + ret=sscanf(buf, "%lx, %lx, %lx", + &err_data_buffer[cpu].data1, + &err_data_buffer[cpu].data2, + &err_data_buffer[cpu].data3); + if (ret!=ERR_DATA_BUFFER_SIZE) + return -EINVAL; + + return size; +} + +show(status) +show(capabilities) +show(resources) + +define_one_rw(call_start); +define_one_rw(err_type_info); +define_one_rw(err_struct_info); +define_one_rw(err_data_buffer); +define_one_rw(virtual_to_phys); +define_one_ro(status); +define_one_ro(capabilities); +define_one_ro(resources); + +static struct attribute *default_attrs[] = { + &attr_call_start.attr, + &attr_virtual_to_phys.attr, + &attr_err_type_info.attr, + &attr_err_struct_info.attr, + &attr_err_data_buffer.attr, + &attr_status.attr, + &attr_capabilities.attr, + &attr_resources.attr, + NULL +}; + +static struct attribute_group err_inject_attr_group = { + .attrs = default_attrs, + .name = "err_inject" +}; +/* Add/Remove err_inject interface for CPU device */ +static int __cpuinit err_inject_add_dev(struct sys_device * sys_dev) +{ + return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group); +} + +static int __cpuinit err_inject_remove_dev(struct sys_device * sys_dev) +{ + sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); + return 0; +} +static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + struct sys_device *sys_dev; + + sys_dev = get_cpu_sysdev(cpu); + switch (action) { + case CPU_ONLINE: + err_inject_add_dev(sys_dev); + break; + case CPU_DEAD: + err_inject_remove_dev(sys_dev); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata err_inject_cpu_notifier = +{ + .notifier_call = err_inject_cpu_callback, +}; + +static int __init +err_inject_init(void) +{ + int i; + +#ifdef ERR_INJ_DEBUG + printk(KERN_INFO "Enter error injection driver.\n"); +#endif + for_each_online_cpu(i) { + err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE, + (void *)(long)i); + } + + register_hotcpu_notifier(&err_inject_cpu_notifier); + + return 0; +} + +static void __exit +err_inject_exit(void) +{ + int i; + struct sys_device *sys_dev; + +#ifdef ERR_INJ_DEBUG + printk(KERN_INFO "Exit error injection driver.\n"); +#endif + for_each_online_cpu(i) { + sys_dev = get_cpu_sysdev(i); + sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); + } + unregister_hotcpu_notifier(&err_inject_cpu_notifier); +} + +module_init(err_inject_init); +module_exit(err_inject_exit); + +MODULE_AUTHOR("Fenghua Yu "); +MODULE_DESCRIPTION("MC error injection kenrel sysfs interface"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 539d517ad10bbaac2c04e0ee22916a360c5bcc0d Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 8 Dec 2006 16:16:24 -0800 Subject: [IA64] Itanium MC Error Injection Tool: Makefile changes This patch has Makefile changes. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 098ee60..33e5a59 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o obj-$(CONFIG_AUDIT) += audit.o obj-$(CONFIG_PCI_MSI) += msi_ia64.o mca_recovery-y += mca_drv.o mca_drv_asm.o +obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o obj-$(CONFIG_IA64_ESI) += esi.o ifneq ($(CONFIG_IA64_ESI),) -- cgit v0.10.2 From 1138b7e2d40711b024768034beb64885994271e4 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 8 Dec 2006 16:17:31 -0800 Subject: [IA64] Itanium MC Error Injection Tool: pal_mc_error_inject() interface This patch implements pal_mc_error_inject() interface in kernel. Both physical mode and virtual mode are supported. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index bc76815..56e9210 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -89,6 +89,8 @@ #define PAL_GET_PSTATE_TYPE_AVGNORESET 2 #define PAL_GET_PSTATE_TYPE_INSTANT 3 +#define PAL_MC_ERROR_INJECT 276 /* Injects processor error or returns injection capabilities */ + #ifndef __ASSEMBLY__ #include @@ -1234,6 +1236,37 @@ ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_in return iprv.status; } +/* Injects the requested processor error or returns info on + * supported injection capabilities for current processor implementation + */ +static inline s64 +ia64_pal_mc_error_inject_phys (u64 err_type_info, u64 err_struct_info, + u64 err_data_buffer, u64 *capabilities, u64 *resources) +{ + struct ia64_pal_retval iprv; + PAL_CALL_PHYS_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info, + err_struct_info, err_data_buffer); + if (capabilities) + *capabilities= iprv.v0; + if (resources) + *resources= iprv.v1; + return iprv.status; +} + +static inline s64 +ia64_pal_mc_error_inject_virt (u64 err_type_info, u64 err_struct_info, + u64 err_data_buffer, u64 *capabilities, u64 *resources) +{ + struct ia64_pal_retval iprv; + PAL_CALL_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info, + err_struct_info, err_data_buffer); + if (capabilities) + *capabilities= iprv.v0; + if (resources) + *resources= iprv.v1; + return iprv.status; +} + /* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot * attempt to correct any expected machine checks. */ -- cgit v0.10.2 From e1b43bd556a611584a65f529e5077c1b54ace4f7 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 5 Feb 2007 15:47:43 -0800 Subject: [IA64] Fix example error injection program Progam accessed using /sys/devices/system/node/node0/cpu%d/err_inject/ This path only exists for CONFIG_NUMA=y systems. Better to use /sys/devices/system/cpu/cpu%d/err_inject/ which is available on all systems. Signed-off-by: Tony Luck diff --git a/Documentation/ia64/err_inject.txt b/Documentation/ia64/err_inject.txt index 26487c1..6449a70 100644 --- a/Documentation/ia64/err_inject.txt +++ b/Documentation/ia64/err_inject.txt @@ -111,7 +111,7 @@ err_injection_tool.c: #define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte. #define PARA_FIELD_NUM 5 #define MASK_SIZE (NR_CPUS/64) -#define PATH_FORMAT "/sys/devices/system/node/node0/cpu%d/err_inject/" +#define PATH_FORMAT "/sys/devices/system/cpu/cpu%d/err_inject/" int sched_setaffinity(pid_t pid, unsigned int len, unsigned long *mask); -- cgit v0.10.2 From a0776ec8e97bf109e7d973d09fc3e1814eb32bfb Mon Sep 17 00:00:00 2001 From: "Chen, Kenneth W" Date: Fri, 13 Oct 2006 10:05:45 -0700 Subject: [IA64] remove per-cpu ia64_phys_stacked_size_p8 It's not efficient to use a per-cpu variable just to store how many physical stack register a cpu has. Ever since the incarnation of ia64 up till upcoming Montecito processor, that variable has "glued" to 96. Having a variable in memory means that the kernel is burning an extra cacheline access on every syscall and kernel exit path. Such "static" value is better served with the instruction patching utility exists today. Convert ia64_phys_stacked_size_p8 into dynamic insn patching. This also has a pleasant side effect of eliminating access to per-cpu area while psr.ic=0 in the kernel exit path. (fixable for per-cpu DTC work, but why bother?) There are some concerns with the default value that the instruc- tion encoded in the kernel image. It shouldn't be concerned. The reasons are: (1) cpu_init() is called at CPU initialization. In there, we find out physical stack register size from PAL and patch two instructions in kernel exit code. The code in question can not be executed before the patching is done. (2) current implementation stores zero in ia64_phys_stacked_size_p8, and that's what the current kernel exit path loads the value with. With the new code, it is equivalent that we store reg size 96 in ia64_phys_stacked_size_p8, thus creating a better safety net. Given (1) above can never fail, having (2) is just a bonus. All in all, this patch allow one less memory reference in the kernel exit path, thus reducing syscall and interrupt return latency; and avoid polluting potential useful data in the CPU cache. Signed-off-by: Ken Chen Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 15234ed..ac4b304 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -767,7 +767,7 @@ ENTRY(ia64_leave_syscall) ld8.fill r15=[r3] // M0|1 restore r15 mov b6=r18 // I0 restore b6 - addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A + LOAD_PHYS_STACK_REG_SIZE(r17) mov f9=f0 // F clear f9 (pKStk) br.cond.dpnt.many skip_rbs_switch // B @@ -775,7 +775,6 @@ ENTRY(ia64_leave_syscall) shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition cover // B add current frame into dirty partition & set cr.ifs ;; -(pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8 mov r19=ar.bsp // M2 get new backing store pointer mov f10=f0 // F clear f10 @@ -953,9 +952,7 @@ GLOBAL_ENTRY(ia64_leave_kernel) shr.u r18=r19,16 // get byte size of existing "dirty" partition ;; mov r16=ar.bsp // get existing backing store pointer - addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 - ;; - ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8 + LOAD_PHYS_STACK_REG_SIZE(r17) (pKStk) br.cond.dpnt skip_rbs_switch /* diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index bc11bb0..e796e29 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c @@ -195,3 +195,23 @@ ia64_patch_gate (void) ia64_patch_vtop(START(vtop), END(vtop)); ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); } + +void ia64_patch_phys_stack_reg(unsigned long val) +{ + s32 * offp = (s32 *) __start___phys_stack_reg_patchlist; + s32 * end = (s32 *) __end___phys_stack_reg_patchlist; + u64 ip, mask, imm; + + /* see instruction format A4: adds r1 = imm13, r3 */ + mask = (0x3fUL << 27) | (0x7f << 13); + imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13; + + while (offp < end) { + ip = (u64) offp + *offp; + ia64_patch(ip, mask, imm); + ia64_fc(ip); + ++offp; + } + ia64_sync_i(); + ia64_srlz_i(); +} diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index ad567b8d..f167b89 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -75,7 +75,6 @@ extern void ia64_setup_printk_clock(void); DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); -DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); unsigned long ia64_cycles_per_usec; struct ia64_boot_param *ia64_boot_param; struct screen_info screen_info; @@ -836,6 +835,7 @@ void __cpuinit cpu_init (void) { extern void __cpuinit ia64_mmu_init (void *); + static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; @@ -949,7 +949,10 @@ cpu_init (void) num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ - __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; + if (num_phys_stacked > max_num_phys_stacked) { + ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); + max_num_phys_stacked = num_phys_stacked; + } platform_cpu_init(); pm_idle = default_idle; } diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index d6083a0..d9599dc 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -78,6 +78,13 @@ SECTIONS __stop___mca_table = .; } + .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) + { + __start___phys_stack_reg_patchlist = .; + *(.data.patch.phys_stack_reg) + __end___phys_stack_reg_patchlist = .; + } + /* Global data */ _data = .; diff --git a/include/asm-ia64/asmmacro.h b/include/asm-ia64/asmmacro.h index c22b465..c1642fd 100644 --- a/include/asm-ia64/asmmacro.h +++ b/include/asm-ia64/asmmacro.h @@ -104,6 +104,16 @@ name: #endif /* + * If physical stack register size is different from DEF_NUM_STACK_REG, + * dynamically patch the kernel for correct size. + */ + .section ".data.patch.phys_stack_reg", "a" + .previous +#define LOAD_PHYS_STACK_REG_SIZE(reg) \ +[1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \ + .xdata4 ".data.patch.phys_stack_reg", 1b-. + +/* * Up until early 2004, use of .align within a function caused bad unwind info. * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing * otherwise. diff --git a/include/asm-ia64/patch.h b/include/asm-ia64/patch.h index 4797f35..a715430 100644 --- a/include/asm-ia64/patch.h +++ b/include/asm-ia64/patch.h @@ -20,6 +20,7 @@ extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end); extern void ia64_patch_vtop (unsigned long start, unsigned long end); +extern void ia64_patch_phys_stack_reg(unsigned long val); extern void ia64_patch_gate (void); #endif /* _ASM_IA64_PATCH_H */ diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 5830d36..88c728b 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -19,6 +19,7 @@ #include #include +#define IA64_NUM_PHYS_STACK_REG 96 #define IA64_NUM_DBG_REGS 8 #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) diff --git a/include/asm-ia64/sections.h b/include/asm-ia64/sections.h index e9eb7f6..dc42a35 100644 --- a/include/asm-ia64/sections.h +++ b/include/asm-ia64/sections.h @@ -11,6 +11,7 @@ extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; extern char __start___vtop_patchlist[], __end___vtop_patchlist[]; extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[]; +extern char __start___phys_stack_reg_patchlist[], __end___phys_stack_reg_patchlist[]; extern char __start_gate_section[]; extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[]; extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[]; -- cgit v0.10.2 From 00b65985fb2fc542b855b03fcda0d0f2bab4f442 Mon Sep 17 00:00:00 2001 From: "Chen, Kenneth W" Date: Fri, 13 Oct 2006 10:08:13 -0700 Subject: [IA64] relax per-cpu TLB requirement to DTC Instead of pinning per-cpu TLB into a DTR, use DTC. This will free up one TLB entry for application, or even kernel if access pattern to per-cpu data area has high temporal locality. Since per-cpu is mapped at the top of region 7 address, we just need to add special case in alt_dtlb_miss. The physical address of per-cpu data is already conveniently stored in IA64_KR(PER_CPU_DATA). Latency for alt_dtlb_miss is not affected as we can hide all the latency. It was measured that alt_dtlb_miss handler has 23 cycles latency before and after the patch. The performance effect is massive for applications that put lots of tlb pressure on CPU. Workload environment like database online transaction processing or application uses tera-byte of memory would benefit the most. Measurement with industry standard database benchmark shown an upward of 1.6% gain. While smaller workloads like cpu, java also showing small improvement. Signed-off-by: Ken Chen Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 6b7fcbd..34f44d8 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -374,6 +374,7 @@ ENTRY(alt_dtlb_miss) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) mov r21=cr.ipsr mov r31=pr + mov r24=PERCPU_ADDR ;; #ifdef CONFIG_DISABLE_VHPT shr.u r22=r16,61 // get the region number into r21 @@ -386,22 +387,30 @@ ENTRY(alt_dtlb_miss) (p8) mov r29=b0 // save b0 (p8) br.cond.dptk dtlb_fault #endif + cmp.ge p10,p11=r16,r24 // access to per_cpu_data? + tbit.z p12,p0=r16,61 // access to region 6? + mov r25=PERCPU_PAGE_SHIFT << 2 + mov r26=PERCPU_PAGE_SIZE + nop.m 0 + nop.b 0 + ;; +(p10) mov r19=IA64_KR(PER_CPU_DATA) +(p11) and r19=r19,r16 // clear non-ppn fields extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? - shr.u r18=r16,57 // move address bit 61 to bit 4 - and r19=r19,r16 // clear ed, reserved bits, and PTE control bits tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? ;; - andcm r18=0x10,r18 // bit 4=~address-bit(61) +(p10) sub r19=r19,r26 +(p10) mov cr.itir=r25 cmp.ne p8,p0=r0,r23 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field +(p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr (p8) br.cond.spnt page_fault dep r21=-1,r21,IA64_PSR_ED_BIT,1 - or r19=r19,r17 // insert PTE control bits into r19 ;; - or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 + or r19=r19,r17 // insert PTE control bits into r19 (p6) mov cr.ipsr=r21 ;; (p7) itc.d r19 // insert the TLB entry diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index c6b607c..8c9c26a 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S @@ -101,14 +101,6 @@ ia64_do_tlb_purge: ;; srlz.d ;; - // 2. Purge DTR for PERCPU data. - movl r16=PERCPU_ADDR - mov r18=PERCPU_PAGE_SHIFT<<2 - ;; - ptr.d r16,r18 - ;; - srlz.d - ;; // 3. Purge ITR for PAL code. GET_THIS_PADDR(r2, ia64_mca_pal_base) ;; @@ -196,22 +188,6 @@ ia64_reload_tr: srlz.i srlz.d ;; - // 2. Reload DTR register for PERCPU data. - GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte) - ;; - movl r16=PERCPU_ADDR // vaddr - movl r18=PERCPU_PAGE_SHIFT<<2 - ;; - mov cr.itir=r18 - mov cr.ifa=r16 - ;; - ld8 r18=[r2] // load per-CPU PTE - mov r16=IA64_TR_PERCPU_DATA; - ;; - itr.d dtr[r16]=r18 - ;; - srlz.d - ;; // 3. Reload ITR for PAL code. GET_THIS_PADDR(r2, ia64_mca_pal_pte) ;; diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1373fae..07d82cd 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -337,7 +337,7 @@ setup_gate (void) void __devinit ia64_mmu_init (void *my_cpu_data) { - unsigned long psr, pta, impl_va_bits; + unsigned long pta, impl_va_bits; extern void __devinit tlb_init (void); #ifdef CONFIG_DISABLE_VHPT @@ -346,15 +346,6 @@ ia64_mmu_init (void *my_cpu_data) # define VHPT_ENABLE_BIT 1 #endif - /* Pin mapping for percpu area into TLB */ - psr = ia64_clear_ic(); - ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, - pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), - PERCPU_PAGE_SHIFT); - - ia64_set_psr(psr); - ia64_srlz_i(); - /* * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped * address space. The IA-64 architecture guarantees that at least 50 bits of diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h index 221b5cb..7e55a58 100644 --- a/include/asm-ia64/kregs.h +++ b/include/asm-ia64/kregs.h @@ -29,8 +29,7 @@ */ #define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */ #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ -#define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */ -#define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */ +#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ /* Processor status register bits: */ #define IA64_PSR_BE_BIT 1 -- cgit v0.10.2 From 24bf10ab2d72863a14187905fd992ca8119c809e Mon Sep 17 00:00:00 2001 From: Stefan Lucke Date: Sun, 18 Feb 2007 01:49:10 -0500 Subject: Input: psmouse - add support for eGalax PS/2 touchscreen controller Based on the touchkit USB and lifebook PS/2 touchscreen driver. The egalax touchsreen controller (PS/2 or USB version) is used in this 7" device: http://www.cartft.com/catalog/il/449 Signed-off-by: Michal Piotrowski Signed-off-by: Andrew Morton Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile index 21a1de6..6751179 100644 --- a/drivers/input/mouse/Makefile +++ b/drivers/input/mouse/Makefile @@ -14,4 +14,5 @@ obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o -psmouse-objs := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o trackpoint.o +psmouse-objs := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o \ + trackpoint.o touchkit_ps2.o diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 0fe5869..eb63855 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -28,6 +28,7 @@ #include "alps.h" #include "lifebook.h" #include "trackpoint.h" +#include "touchkit_ps2.h" #define DRIVER_DESC "PS/2 mouse driver" @@ -605,14 +606,20 @@ static int psmouse_extensions(struct psmouse *psmouse, } } - if (max_proto > PSMOUSE_IMEX && genius_detect(psmouse, set_properties) == 0) - return PSMOUSE_GENPS; + if (max_proto > PSMOUSE_IMEX) { + + if (genius_detect(psmouse, set_properties) == 0) + return PSMOUSE_GENPS; + + if (ps2pp_init(psmouse, set_properties) == 0) + return PSMOUSE_PS2PP; - if (max_proto > PSMOUSE_IMEX && ps2pp_init(psmouse, set_properties) == 0) - return PSMOUSE_PS2PP; + if (trackpoint_detect(psmouse, set_properties) == 0) + return PSMOUSE_TRACKPOINT; - if (max_proto > PSMOUSE_IMEX && trackpoint_detect(psmouse, set_properties) == 0) - return PSMOUSE_TRACKPOINT; + if (touchkit_ps2_detect(psmouse, set_properties) == 0) + return PSMOUSE_TOUCHKIT_PS2; + } /* * Reset to defaults in case the device got confused by extended @@ -713,6 +720,12 @@ static const struct psmouse_protocol psmouse_protocols[] = { .detect = trackpoint_detect, }, { + .type = PSMOUSE_TOUCHKIT_PS2, + .name = "touchkitPS/2", + .alias = "touchkit", + .detect = touchkit_ps2_detect, + }, + { .type = PSMOUSE_AUTO, .name = "auto", .alias = "any", diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h index cf1de95..3964e8a 100644 --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h @@ -87,6 +87,7 @@ enum psmouse_type { PSMOUSE_ALPS, PSMOUSE_LIFEBOOK, PSMOUSE_TRACKPOINT, + PSMOUSE_TOUCHKIT_PS2, PSMOUSE_AUTO /* This one should always be last */ }; diff --git a/drivers/input/mouse/touchkit_ps2.c b/drivers/input/mouse/touchkit_ps2.c new file mode 100644 index 0000000..7b977fd --- /dev/null +++ b/drivers/input/mouse/touchkit_ps2.c @@ -0,0 +1,100 @@ +/* ---------------------------------------------------------------------------- + * touchkit_ps2.c -- Driver for eGalax TouchKit PS/2 Touchscreens + * + * Copyright (C) 2005 by Stefan Lucke + * Copyright (C) 2004 by Daniel Ritz + * Copyright (C) by Todd E. Johnson (mtouchusb.c) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Based upon touchkitusb.c + * + * Vendor documentation is available in support section of: + * http://www.egalax.com.tw/ + */ + +#include +#include + +#include +#include +#include + +#include "psmouse.h" +#include "touchkit_ps2.h" + +#define TOUCHKIT_MAX_XC 0x07ff +#define TOUCHKIT_MAX_YC 0x07ff + +#define TOUCHKIT_CMD 0x0a +#define TOUCHKIT_CMD_LENGTH 1 + +#define TOUCHKIT_CMD_ACTIVE 'A' +#define TOUCHKIT_CMD_FIRMWARE_VERSION 'D' +#define TOUCHKIT_CMD_CONTROLLER_TYPE 'E' + +#define TOUCHKIT_SEND_PARMS(s, r, c) ((s) << 12 | (r) << 8 | (c)) + +#define TOUCHKIT_GET_TOUCHED(packet) (((packet)[0]) & 0x01) +#define TOUCHKIT_GET_X(packet) (((packet)[1] << 7) | (packet)[2]) +#define TOUCHKIT_GET_Y(packet) (((packet)[3] << 7) | (packet)[4]) + +static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse) +{ + unsigned char *packet = psmouse->packet; + struct input_dev *dev = psmouse->dev; + + if (psmouse->pktcnt != 5) + return PSMOUSE_GOOD_DATA; + + input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet)); + input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet)); + input_report_key(dev, BTN_TOUCH, TOUCHKIT_GET_TOUCHED(packet)); + input_sync(dev); + + return PSMOUSE_FULL_PACKET; +} + +int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties) +{ + struct input_dev *dev = psmouse->dev; + unsigned char param[3]; + int command; + + param[0] = TOUCHKIT_CMD_LENGTH; + param[1] = TOUCHKIT_CMD_ACTIVE; + command = TOUCHKIT_SEND_PARMS(2, 3, TOUCHKIT_CMD); + + if (ps2_command(&psmouse->ps2dev, param, command)) + return -ENODEV; + + if (param[0] != TOUCHKIT_CMD || param[1] != 0x01 || + param[2] != TOUCHKIT_CMD_ACTIVE) + return -ENODEV; + + if (set_properties) { + dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); + set_bit(BTN_TOUCH, dev->keybit); + input_set_abs_params(dev, ABS_X, 0, TOUCHKIT_MAX_XC, 0, 0); + input_set_abs_params(dev, ABS_Y, 0, TOUCHKIT_MAX_YC, 0, 0); + + psmouse->vendor = "eGalax"; + psmouse->name = "Touchscreen"; + psmouse->protocol_handler = touchkit_ps2_process_byte; + psmouse->pktsize = 5; + } + + return 0; +} diff --git a/drivers/input/mouse/touchkit_ps2.h b/drivers/input/mouse/touchkit_ps2.h new file mode 100644 index 0000000..3f03fbc --- /dev/null +++ b/drivers/input/mouse/touchkit_ps2.h @@ -0,0 +1,17 @@ +/* ---------------------------------------------------------------------------- + * touchkit_ps2.h -- Driver for eGalax TouchKit PS/2 Touchscreens + * + * Copyright (C) 2005 by Stefan Lucke + * Copyright (c) 2005 Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef _TOUCHKIT_PS2_H +#define _TOUCHKIT_PS2_H + +int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties); + +#endif -- cgit v0.10.2 From bebb8a2bc180a4c920c57e89b2d713a34c1d096c Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Sun, 18 Feb 2007 01:50:18 -0500 Subject: Input: add driver for MIPS Cobalt back panel buttons Tested on Cobalt Qube2. Signed-off-by: Yoichi Yuasa Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 41b4258..5694115 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -40,6 +40,15 @@ config INPUT_M68K_BEEP tristate "M68k Beeper support" depends on M68K +config INPUT_COBALT_BTNS + tristate "Cobalt button interface" + depends on MIPS_COBALT + help + Say Y here if you want to support MIPS Cobalt button interface. + + To compile this driver as a module, choose M here: the + module will be called cobalt_btns. + config INPUT_WISTRON_BTNS tristate "x86 Wistron laptop button interface" depends on X86 && !X86_64 @@ -82,7 +91,7 @@ config INPUT_UINPUT module will be called uinput. config HP_SDC_RTC - tristate "HP SDC Real Time Clock" + tristate "HP SDC Real Time Clock" depends on GSC || HP300 select HP_SDC help diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index e0a8d58..9f08f27 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o obj-$(CONFIG_INPUT_UINPUT) += uinput.o +obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c new file mode 100644 index 0000000..b14f49e --- /dev/null +++ b/drivers/input/misc/cobalt_btns.c @@ -0,0 +1,209 @@ +/* + * Cobalt button interface driver. + * + * Copyright (C) 2007 Yoichi Yuasa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include +#include + +#define BUTTONS_POLL_INTERVAL 30 /* msec */ +#define BUTTONS_COUNT_THRESHOLD 3 +#define BUTTONS_STATUS_MASK 0xfe000000 + +struct buttons_dev { + struct input_dev *input; + void __iomem *reg; +}; + +struct buttons_map { + uint32_t mask; + int keycode; + int count; +}; + +static struct buttons_map buttons_map[] = { + { 0x02000000, KEY_RESTART, }, + { 0x04000000, KEY_LEFT, }, + { 0x08000000, KEY_UP, }, + { 0x10000000, KEY_DOWN, }, + { 0x20000000, KEY_RIGHT, }, + { 0x40000000, KEY_ENTER, }, + { 0x80000000, KEY_SELECT, }, +}; + +static struct resource cobalt_buttons_resource __initdata = { + .start = 0x1d000000, + .end = 0x1d000003, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device *cobalt_buttons_device; + +static struct timer_list buttons_timer; + +static void handle_buttons(unsigned long data) +{ + struct buttons_map *button = buttons_map; + struct buttons_dev *bdev; + uint32_t status; + int i; + + bdev = (struct buttons_dev *)data; + status = readl(bdev->reg); + status = ~status & BUTTONS_STATUS_MASK; + + for (i = 0; i < ARRAY_SIZE(buttons_map); i++) { + if (status & button->mask) { + button->count++; + } else { + if (button->count >= BUTTONS_COUNT_THRESHOLD) { + input_report_key(bdev->input, button->keycode, 0); + input_sync(bdev->input); + } + button->count = 0; + } + + if (button->count == BUTTONS_COUNT_THRESHOLD) { + input_report_key(bdev->input, button->keycode, 1); + input_sync(bdev->input); + } + + button++; + } + + mod_timer(&buttons_timer, jiffies + msecs_to_jiffies(BUTTONS_POLL_INTERVAL)); +} + +static int cobalt_buttons_open(struct input_dev *dev) +{ + mod_timer(&buttons_timer, jiffies + msecs_to_jiffies(BUTTONS_POLL_INTERVAL)); + + return 0; +} + +static void cobalt_buttons_close(struct input_dev *dev) +{ + del_timer_sync(&buttons_timer); +} + +static int __devinit cobalt_buttons_probe(struct platform_device *pdev) +{ + struct buttons_dev *bdev; + struct input_dev *input; + struct resource *res; + int error, i; + + bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL); + input = input_allocate_device(); + if (!bdev || !input) { + error = -ENOMEM; + goto err_free_mem; + } + + input->name = "Cobalt buttons"; + input->phys = "cobalt/input0"; + input->id.bustype = BUS_HOST; + input->cdev.dev = &pdev->dev; + input->open = cobalt_buttons_open; + input->close = cobalt_buttons_close; + + input->evbit[0] = BIT(EV_KEY); + for (i = 0; i < ARRAY_SIZE(buttons_map); i++) { + set_bit(buttons_map[i].keycode, input->keybit); + buttons_map[i].count = 0; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + error = -EBUSY; + goto err_free_mem; + } + + bdev->input = input; + bdev->reg = ioremap(res->start, res->end - res->start + 1); + dev_set_drvdata(&pdev->dev, bdev); + + setup_timer(&buttons_timer, handle_buttons, (unsigned long)bdev); + + error = input_register_device(input); + if (error) + goto err_iounmap; + + return 0; + + err_iounmap: + iounmap(bdev->reg); + err_free_mem: + input_free_device(input); + kfree(bdev); + dev_set_drvdata(&pdev->dev, NULL); + return error; +} + +static int __devexit cobalt_buttons_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct buttons_dev *bdev = dev_get_drvdata(dev); + + input_unregister_device(bdev->input); + iounmap(bdev->reg); + kfree(bdev); + dev_set_drvdata(dev, NULL); + + return 0; +} + +static struct platform_driver cobalt_buttons_driver = { + .probe = cobalt_buttons_probe, + .remove = __devexit_p(cobalt_buttons_remove), + .driver = { + .name = "Cobalt buttons", + .owner = THIS_MODULE, + }, +}; + +static int __init cobalt_buttons_init(void) +{ + int retval; + + cobalt_buttons_device = platform_device_register_simple("Cobalt buttons", -1, + &cobalt_buttons_resource, 1); + if (IS_ERR(cobalt_buttons_device)) { + retval = PTR_ERR(cobalt_buttons_device); + return retval; + } + + retval = platform_driver_register(&cobalt_buttons_driver); + if (retval < 0) + platform_device_unregister(cobalt_buttons_device); + + return retval; +} + +static void __exit cobalt_buttons_exit(void) +{ + platform_driver_unregister(&cobalt_buttons_driver); + platform_device_unregister(cobalt_buttons_device); +} + +module_init(cobalt_buttons_init); +module_exit(cobalt_buttons_exit); -- cgit v0.10.2 From 969111e900226a8dbd1f596f34c09eecd20afc7d Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 28 Feb 2007 23:51:03 -0500 Subject: Input: ads7846 - add support for the ads7843 touchscreen The ads7843 support has now become almost trivial since the last rework. Signed-off-by: Nicolas Ferre Acked-by: David Brownell Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 0a26e06..2f5fc80 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -39,7 +39,8 @@ /* * This code has been heavily tested on a Nokia 770, and lightly * tested on other ads7846 devices (OSK/Mistral, Lubbock). - * Support for ads7843 and ads7845 has only been stubbed in. + * Support for ads7843 tested on Atmel at91sam926x-EK. + * Support for ads7845 has only been stubbed in. * * IRQ handling needs a workaround because of a shortcoming in handling * edge triggered IRQs on some platforms like the OMAP1/2. These @@ -246,18 +247,16 @@ static int ads7846_read12_ser(struct device *dev, unsigned command) /* REVISIT: take a few more samples, and compare ... */ - /* maybe off internal vREF */ - if (use_internal) { - req->ref_off = REF_OFF; - req->xfer[4].tx_buf = &req->ref_off; - req->xfer[4].len = 1; - spi_message_add_tail(&req->xfer[4], &req->msg); - - req->xfer[5].rx_buf = &req->scratch; - req->xfer[5].len = 2; - CS_CHANGE(req->xfer[5]); - spi_message_add_tail(&req->xfer[5], &req->msg); - } + /* converter in low power mode & enable PENIRQ */ + req->ref_off = PWRDOWN; + req->xfer[4].tx_buf = &req->ref_off; + req->xfer[4].len = 1; + spi_message_add_tail(&req->xfer[4], &req->msg); + + req->xfer[5].rx_buf = &req->scratch; + req->xfer[5].len = 2; + CS_CHANGE(req->xfer[5]); + spi_message_add_tail(&req->xfer[5], &req->msg); ts->irq_disabled = 1; disable_irq(spi->irq); @@ -536,6 +535,9 @@ static void ads7846_rx(void *ads) } else Rt = 0; + if (ts->model == 7843) + Rt = ts->pressure_max / 2; + /* Sample found inconsistent by debouncing or pressure is beyond * the maximum. Don't report it to user space, repeat at least * once more the measurement -- cgit v0.10.2 From 3acaf540a33199141695f2e2fcfa8829053159bf Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 28 Feb 2007 23:51:19 -0500 Subject: Input: HIL - various fixes for HIL drivers - mark some structures const or __read_mostly - hilkbd.c: fix uninitialized spinlock in HIL keyboard driver - hil_mlc.c: use USEC_PER_SEC instead of 1000000 - hp_sdc: bugfix for request_irq()/free_irq() parameters, this prevented multiple load/unload cycles as module Signed-off-by: Helge Deller Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c index 7cc9728..7143f37 100644 --- a/drivers/input/keyboard/hil_kbd.c +++ b/drivers/input/keyboard/hil_kbd.c @@ -51,7 +51,7 @@ MODULE_LICENSE("Dual BSD/GPL"); #define HIL_KBD_SET1_UPBIT 0x01 #define HIL_KBD_SET1_SHIFT 1 -static unsigned int hil_kbd_set1[HIL_KEYCODES_SET1_TBLSIZE] = +static unsigned int hil_kbd_set1[HIL_KEYCODES_SET1_TBLSIZE] __read_mostly = { HIL_KEYCODES_SET1 }; #define HIL_KBD_SET2_UPBIT 0x01 @@ -60,10 +60,10 @@ static unsigned int hil_kbd_set1[HIL_KEYCODES_SET1_TBLSIZE] = #define HIL_KBD_SET3_UPBIT 0x80 #define HIL_KBD_SET3_SHIFT 0 -static unsigned int hil_kbd_set3[HIL_KEYCODES_SET3_TBLSIZE] = +static unsigned int hil_kbd_set3[HIL_KEYCODES_SET3_TBLSIZE] __read_mostly = { HIL_KEYCODES_SET3 }; -static char hil_language[][16] = { HIL_LOCALE_MAP }; +static const char hil_language[][16] = { HIL_LOCALE_MAP }; struct hil_kbd { struct input_dev *dev; @@ -368,7 +368,7 @@ static struct serio_device_id hil_kbd_ids[] = { { 0 } }; -struct serio_driver hil_kbd_serio_drv = { +static struct serio_driver hil_kbd_serio_drv = { .driver = { .name = "hil_kbd", }, diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c index 4de4dc2..230f5db 100644 --- a/drivers/input/keyboard/hilkbd.c +++ b/drivers/input/keyboard/hilkbd.c @@ -3,7 +3,7 @@ * * Copyright (C) 1998 Philip Blundell * Copyright (C) 1999 Matthew Wilcox - * Copyright (C) 1999-2006 Helge Deller + * Copyright (C) 1999-2007 Helge Deller * * Very basic HP Human Interface Loop (HIL) driver. * This driver handles the keyboard on HP300 (m68k) and on some @@ -89,7 +89,7 @@ MODULE_LICENSE("GPL v2"); #define HIL_READKBDSADR 0xF9 #define HIL_WRITEKBDSADR 0xE9 -static unsigned int hphilkeyb_keycode[HIL_KEYCODES_SET1_TBLSIZE] = +static unsigned int hphilkeyb_keycode[HIL_KEYCODES_SET1_TBLSIZE] __read_mostly = { HIL_KEYCODES_SET1 }; /* HIL structure */ @@ -211,6 +211,7 @@ hil_keyb_init(void) return -ENODEV; /* already initialized */ } + spin_lock_init(&hil_dev.lock); hil_dev.dev = input_allocate_device(); if (!hil_dev.dev) return -ENOMEM; diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c index 4fa93ff..0710704 100644 --- a/drivers/input/serio/hil_mlc.c +++ b/drivers/input/serio/hil_mlc.c @@ -408,7 +408,7 @@ static int hilse_operate(hil_mlc *mlc, int repoll) { #define OUT_LAST(pack) \ { HILSE_OUT_LAST, { .packet = pack }, 0, 0, 0, 0 }, -struct hilse_node hil_mlc_se[HILSEN_END] = { +const struct hilse_node hil_mlc_se[HILSEN_END] = { /* 0 HILSEN_START */ FUNC(hilse_init_lcv, 0, HILSEN_NEXT, HILSEN_SLEEP, 0) @@ -530,7 +530,7 @@ struct hilse_node hil_mlc_se[HILSEN_END] = { /* 60 HILSEN_END */ }; -static inline void hilse_setup_input(hil_mlc *mlc, struct hilse_node *node) { +static inline void hilse_setup_input(hil_mlc *mlc, const struct hilse_node *node) { switch (node->act) { case HILSE_EXPECT_DISC: @@ -563,21 +563,19 @@ static inline void hilse_setup_input(hil_mlc *mlc, struct hilse_node *node) { #ifdef HIL_MLC_DEBUG static int doze = 0; static int seidx; /* For debug */ -static int kick = 1; #endif static int hilse_donode (hil_mlc *mlc) { - struct hilse_node *node; + const struct hilse_node *node; int nextidx = 0; int sched_long = 0; unsigned long flags; #ifdef HIL_MLC_DEBUG if (mlc->seidx && (mlc->seidx != seidx) && mlc->seidx != 41 && mlc->seidx != 42 && mlc->seidx != 43) { - printk(KERN_DEBUG PREFIX "z%i \n%s {%i}", doze, kick ? "K" : "", mlc->seidx); + printk(KERN_DEBUG PREFIX "z%i \n {%i}", doze, mlc->seidx); doze = 0; } - kick = 0; seidx = mlc->seidx; #endif @@ -588,7 +586,7 @@ static int hilse_donode (hil_mlc *mlc) { hil_packet pack; case HILSE_FUNC: - if (node->object.func == NULL) break; + BUG_ON(node->object.func == NULL); rc = node->object.func(mlc, node->arg); nextidx = (rc > 0) ? node->ugly : ((rc < 0) ? node->bad : node->good); @@ -674,10 +672,10 @@ static int hilse_donode (hil_mlc *mlc) { if (!sched_long) goto sched; do_gettimeofday(&tv); - tv.tv_usec += 1000000 * (tv.tv_sec - mlc->instart.tv_sec); + tv.tv_usec += USEC_PER_SEC * (tv.tv_sec - mlc->instart.tv_sec); tv.tv_usec -= mlc->instart.tv_usec; if (tv.tv_usec >= mlc->intimeout) goto sched; - tv.tv_usec = (mlc->intimeout - tv.tv_usec) * HZ / 1000000; + tv.tv_usec = (mlc->intimeout - tv.tv_usec) * HZ / USEC_PER_SEC; if (!tv.tv_usec) goto sched; mod_timer(&hil_mlcs_kicker, jiffies + tv.tv_usec); break; @@ -837,7 +835,7 @@ static void hil_mlc_serio_close(struct serio *serio) { /* TODO wake up interruptable */ } -static struct serio_device_id hil_mlc_serio_id = { +static const struct serio_device_id hil_mlc_serio_id = { .type = SERIO_HIL_MLC, .proto = SERIO_HIL, .extra = SERIO_ANY, @@ -873,6 +871,8 @@ int hil_mlc_register(hil_mlc *mlc) { hil_mlc_copy_di_scratch(mlc, i); mlc_serio = kzalloc(sizeof(*mlc_serio), GFP_KERNEL); mlc->serio[i] = mlc_serio; + snprintf(mlc_serio->name, sizeof(mlc_serio->name)-1, "HIL_SERIO%d", i); + snprintf(mlc_serio->phys, sizeof(mlc_serio->phys)-1, "HIL%d", i); mlc_serio->id = hil_mlc_serio_id; mlc_serio->write = hil_mlc_serio_write; mlc_serio->open = hil_mlc_serio_open; diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c index b57370d..353a8a1 100644 --- a/drivers/input/serio/hp_sdc.c +++ b/drivers/input/serio/hp_sdc.c @@ -748,7 +748,7 @@ void hp_sdc_kicker (unsigned long data) { #if defined(__hppa__) -static struct parisc_device_id hp_sdc_tbl[] = { +static const struct parisc_device_id hp_sdc_tbl[] = { { .hw_type = HPHW_FIO, .hversion_rev = HVERSION_REV_ANY_ID, @@ -817,12 +817,12 @@ static int __init hp_sdc_init(void) #endif errstr = "IRQ not available for"; - if(request_irq(hp_sdc.irq, &hp_sdc_isr, 0, "HP SDC", - (void *) hp_sdc.base_io)) goto err1; + if (request_irq(hp_sdc.irq, &hp_sdc_isr, IRQF_SHARED|IRQF_SAMPLE_RANDOM, + "HP SDC", &hp_sdc)) goto err1; errstr = "NMI not available for"; - if (request_irq(hp_sdc.nmi, &hp_sdc_nmisr, 0, "HP SDC NMI", - (void *) hp_sdc.base_io)) goto err2; + if (request_irq(hp_sdc.nmi, &hp_sdc_nmisr, IRQF_SHARED, + "HP SDC NMI", &hp_sdc)) goto err2; printk(KERN_INFO PREFIX "HP SDC at 0x%p, IRQ %d (NMI IRQ %d)\n", (void *)hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi); @@ -854,7 +854,7 @@ static int __init hp_sdc_init(void) hp_sdc.dev_err = 0; return 0; err2: - free_irq(hp_sdc.irq, NULL); + free_irq(hp_sdc.irq, &hp_sdc); err1: release_region(hp_sdc.data_io, 2); err0: @@ -898,8 +898,8 @@ static void hp_sdc_exit(void) /* Wait until we know this has been processed by the i8042 */ hp_sdc_spin_ibf(); - free_irq(hp_sdc.nmi, NULL); - free_irq(hp_sdc.irq, NULL); + free_irq(hp_sdc.nmi, &hp_sdc); + free_irq(hp_sdc.irq, &hp_sdc); write_unlock_irq(&hp_sdc.lock); del_timer(&hp_sdc.kicker); diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c index aa4a8a4..1f131ff 100644 --- a/drivers/input/serio/hp_sdc_mlc.c +++ b/drivers/input/serio/hp_sdc_mlc.c @@ -323,11 +323,12 @@ static int __init hp_sdc_mlc_init(void) mlc->in = &hp_sdc_mlc_in; mlc->out = &hp_sdc_mlc_out; + mlc->priv = &hp_sdc_mlc_priv; + if (hil_mlc_register(mlc)) { printk(KERN_WARNING PREFIX "Failed to register MLC structure with hil_mlc\n"); goto err0; } - mlc->priv = &hp_sdc_mlc_priv; if (hp_sdc_request_hil_irq(&hp_sdc_mlc_isr)) { printk(KERN_WARNING PREFIX "Request for raw HIL ISR hook denied\n"); -- cgit v0.10.2 From ffd51f46cdf856c0b453d2828a74d552cc15f881 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 28 Feb 2007 23:51:29 -0500 Subject: Input: HIL - cleanup coding style Signed-off-by: Helge Deller Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c index 7143f37..18f4d41 100644 --- a/drivers/input/keyboard/hil_kbd.c +++ b/drivers/input/keyboard/hil_kbd.c @@ -94,10 +94,12 @@ static void hil_kbd_process_record(struct hil_kbd *kbd) idx = kbd->idx4/4; p = data[idx - 1]; - if ((p & ~HIL_CMDCT_POL) == - (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL)) goto report; - if ((p & ~HIL_CMDCT_RPL) == - (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_RPL)) goto report; + if ((p & ~HIL_CMDCT_POL) == + (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL)) + goto report; + if ((p & ~HIL_CMDCT_RPL) == + (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_RPL)) + goto report; /* Not a poll response. See if we are loading config records. */ switch (p & HIL_PKT_DATA_MASK) { @@ -107,27 +109,32 @@ static void hil_kbd_process_record(struct hil_kbd *kbd) for (; i < HIL_KBD_MAX_LENGTH; i++) kbd->idd[i] = 0; break; + case HIL_CMD_RSC: for (i = 0; i < idx; i++) kbd->rsc[i] = kbd->data[i] & HIL_PKT_DATA_MASK; for (; i < HIL_KBD_MAX_LENGTH; i++) kbd->rsc[i] = 0; break; + case HIL_CMD_EXD: for (i = 0; i < idx; i++) kbd->exd[i] = kbd->data[i] & HIL_PKT_DATA_MASK; for (; i < HIL_KBD_MAX_LENGTH; i++) kbd->exd[i] = 0; break; + case HIL_CMD_RNM: for (i = 0; i < idx; i++) kbd->rnm[i] = kbd->data[i] & HIL_PKT_DATA_MASK; for (; i < HIL_KBD_MAX_LENGTH + 1; i++) kbd->rnm[i] = '\0'; break; + default: /* These occur when device isn't present */ - if (p == (HIL_ERR_INT | HIL_PKT_CMD)) break; + if (p == (HIL_ERR_INT | HIL_PKT_CMD)) + break; /* Anything else we'd like to know about. */ printk(KERN_WARNING PREFIX "Device sent unknown record %x\n", p); break; @@ -139,16 +146,19 @@ static void hil_kbd_process_record(struct hil_kbd *kbd) switch (kbd->data[0] & HIL_POL_CHARTYPE_MASK) { case HIL_POL_CHARTYPE_NONE: break; + case HIL_POL_CHARTYPE_ASCII: while (cnt < idx - 1) input_report_key(dev, kbd->data[cnt++] & 0x7f, 1); break; + case HIL_POL_CHARTYPE_RSVD1: case HIL_POL_CHARTYPE_RSVD2: case HIL_POL_CHARTYPE_BINARY: while (cnt < idx - 1) input_report_key(dev, kbd->data[cnt++], 1); break; + case HIL_POL_CHARTYPE_SET1: while (cnt < idx - 1) { unsigned int key; @@ -161,6 +171,7 @@ static void hil_kbd_process_record(struct hil_kbd *kbd) input_report_key(dev, key, !up); } break; + case HIL_POL_CHARTYPE_SET2: while (cnt < idx - 1) { unsigned int key; @@ -173,6 +184,7 @@ static void hil_kbd_process_record(struct hil_kbd *kbd) input_report_key(dev, key, !up); } break; + case HIL_POL_CHARTYPE_SET3: while (cnt < idx - 1) { unsigned int key; @@ -191,42 +203,43 @@ static void hil_kbd_process_record(struct hil_kbd *kbd) up(&kbd->sem); } -static void hil_kbd_process_err(struct hil_kbd *kbd) { +static void hil_kbd_process_err(struct hil_kbd *kbd) +{ printk(KERN_WARNING PREFIX "errored HIL packet\n"); kbd->idx4 = 0; up(&kbd->sem); } -static irqreturn_t hil_kbd_interrupt(struct serio *serio, - unsigned char data, unsigned int flags) +static irqreturn_t hil_kbd_interrupt(struct serio *serio, + unsigned char data, unsigned int flags) { struct hil_kbd *kbd; hil_packet packet; int idx; kbd = serio_get_drvdata(serio); - if (kbd == NULL) { - BUG(); - return IRQ_HANDLED; - } + BUG_ON(kbd == NULL); if (kbd->idx4 >= (HIL_KBD_MAX_LENGTH * sizeof(hil_packet))) { hil_kbd_process_err(kbd); return IRQ_HANDLED; } idx = kbd->idx4/4; - if (!(kbd->idx4 % 4)) kbd->data[idx] = 0; + if (!(kbd->idx4 % 4)) + kbd->data[idx] = 0; packet = kbd->data[idx]; packet |= ((hil_packet)data) << ((3 - (kbd->idx4 % 4)) * 8); kbd->data[idx] = packet; /* Records of N 4-byte hil_packets must terminate with a command. */ - if ((++(kbd->idx4)) % 4) return IRQ_HANDLED; + if ((++(kbd->idx4)) % 4) + return IRQ_HANDLED; if ((packet & 0xffff0000) != HIL_ERR_INT) { hil_kbd_process_err(kbd); return IRQ_HANDLED; } - if (packet & HIL_PKT_CMD) hil_kbd_process_record(kbd); + if (packet & HIL_PKT_CMD) + hil_kbd_process_record(kbd); return IRQ_HANDLED; } @@ -235,10 +248,7 @@ static void hil_kbd_disconnect(struct serio *serio) struct hil_kbd *kbd; kbd = serio_get_drvdata(serio); - if (kbd == NULL) { - BUG(); - return; - } + BUG_ON(kbd == NULL); serio_close(serio); input_unregister_device(kbd->dev); @@ -267,34 +277,34 @@ static int hil_kbd_connect(struct serio *serio, struct serio_driver *drv) serio_set_drvdata(serio, kbd); kbd->serio = serio; - init_MUTEX_LOCKED(&(kbd->sem)); + init_MUTEX_LOCKED(&kbd->sem); /* Get device info. MLC driver supplies devid/status/etc. */ serio->write(serio, 0); serio->write(serio, 0); serio->write(serio, HIL_PKT_CMD >> 8); serio->write(serio, HIL_CMD_IDD); - down(&(kbd->sem)); + down(&kbd->sem); serio->write(serio, 0); serio->write(serio, 0); serio->write(serio, HIL_PKT_CMD >> 8); serio->write(serio, HIL_CMD_RSC); - down(&(kbd->sem)); + down(&kbd->sem); serio->write(serio, 0); serio->write(serio, 0); serio->write(serio, HIL_PKT_CMD >> 8); serio->write(serio, HIL_CMD_RNM); - down(&(kbd->sem)); + down(&kbd->sem); serio->write(serio, 0); serio->write(serio, 0); serio->write(serio, HIL_PKT_CMD >> 8); serio->write(serio, HIL_CMD_EXD); - down(&(kbd->sem)); + down(&kbd->sem); - up(&(kbd->sem)); + up(&kbd->sem); did = kbd->idd[0]; idd = kbd->idd + 1; @@ -310,12 +320,11 @@ static int hil_kbd_connect(struct serio *serio, struct serio_driver *drv) goto bail2; } - if(HIL_IDD_NUM_BUTTONS(idd) || HIL_IDD_NUM_AXES_PER_SET(*idd)) { + if (HIL_IDD_NUM_BUTTONS(idd) || HIL_IDD_NUM_AXES_PER_SET(*idd)) { printk(KERN_INFO PREFIX "keyboards only, no combo devices supported.\n"); goto bail2; } - kbd->dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); kbd->dev->ledbit[0] = BIT(LED_NUML) | BIT(LED_CAPSL) | BIT(LED_SCROLLL); kbd->dev->keycodemax = HIL_KEYCODES_SET1_TBLSIZE; @@ -344,8 +353,8 @@ static int hil_kbd_connect(struct serio *serio, struct serio_driver *drv) serio->write(serio, 0); serio->write(serio, HIL_PKT_CMD >> 8); serio->write(serio, HIL_CMD_EK1); /* Enable Keyswitch Autorepeat 1 */ - down(&(kbd->sem)); - up(&(kbd->sem)); + down(&kbd->sem); + up(&kbd->sem); return 0; bail2: @@ -374,20 +383,20 @@ static struct serio_driver hil_kbd_serio_drv = { }, .description = "HP HIL keyboard driver", .id_table = hil_kbd_ids, - .connect = hil_kbd_connect, - .disconnect = hil_kbd_disconnect, - .interrupt = hil_kbd_interrupt + .connect = hil_kbd_connect, + .disconnect = hil_kbd_disconnect, + .interrupt = hil_kbd_interrupt }; static int __init hil_kbd_init(void) { return serio_register_driver(&hil_kbd_serio_drv); } - + static void __exit hil_kbd_exit(void) { serio_unregister_driver(&hil_kbd_serio_drv); } - + module_init(hil_kbd_init); module_exit(hil_kbd_exit); diff --git a/drivers/input/mouse/hil_ptr.c b/drivers/input/mouse/hil_ptr.c index bfb174f..8e9421a 100644 --- a/drivers/input/mouse/hil_ptr.c +++ b/drivers/input/mouse/hil_ptr.c @@ -88,10 +88,12 @@ static void hil_ptr_process_record(struct hil_ptr *ptr) idx = ptr->idx4/4; p = data[idx - 1]; - if ((p & ~HIL_CMDCT_POL) == - (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL)) goto report; - if ((p & ~HIL_CMDCT_RPL) == - (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_RPL)) goto report; + if ((p & ~HIL_CMDCT_POL) == + (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL)) + goto report; + if ((p & ~HIL_CMDCT_RPL) == + (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_RPL)) + goto report; /* Not a poll response. See if we are loading config records. */ switch (p & HIL_PKT_DATA_MASK) { @@ -101,27 +103,32 @@ static void hil_ptr_process_record(struct hil_ptr *ptr) for (; i < HIL_PTR_MAX_LENGTH; i++) ptr->idd[i] = 0; break; + case HIL_CMD_RSC: for (i = 0; i < idx; i++) ptr->rsc[i] = ptr->data[i] & HIL_PKT_DATA_MASK; for (; i < HIL_PTR_MAX_LENGTH; i++) ptr->rsc[i] = 0; break; + case HIL_CMD_EXD: for (i = 0; i < idx; i++) ptr->exd[i] = ptr->data[i] & HIL_PKT_DATA_MASK; for (; i < HIL_PTR_MAX_LENGTH; i++) ptr->exd[i] = 0; break; + case HIL_CMD_RNM: for (i = 0; i < idx; i++) ptr->rnm[i] = ptr->data[i] & HIL_PKT_DATA_MASK; for (; i < HIL_PTR_MAX_LENGTH + 1; i++) - ptr->rnm[i] = '\0'; + ptr->rnm[i] = 0; break; + default: /* These occur when device isn't present */ - if (p == (HIL_ERR_INT | HIL_PKT_CMD)) break; + if (p == (HIL_ERR_INT | HIL_PKT_CMD)) + break; /* Anything else we'd like to know about. */ printk(KERN_WARNING PREFIX "Device sent unknown record %x\n", p); break; @@ -130,7 +137,8 @@ static void hil_ptr_process_record(struct hil_ptr *ptr) report: if ((p & HIL_CMDCT_POL) != idx - 1) { - printk(KERN_WARNING PREFIX "Malformed poll packet %x (idx = %i)\n", p, idx); + printk(KERN_WARNING PREFIX + "Malformed poll packet %x (idx = %i)\n", p, idx); goto out; } @@ -139,7 +147,7 @@ static void hil_ptr_process_record(struct hil_ptr *ptr) laxis += i; ax16 = ptr->idd[1] & HIL_IDD_HEADER_16BIT; /* 8 or 16bit resolution */ - absdev = ptr->idd[1] & HIL_IDD_HEADER_ABS; + absdev = ptr->idd[1] & HIL_IDD_HEADER_ABS; for (cnt = 1; i < laxis; i++) { unsigned int lo,hi,val; @@ -157,7 +165,8 @@ static void hil_ptr_process_record(struct hil_ptr *ptr) input_report_abs(dev, ABS_X + i, val); } else { val = (int) (((int8_t)lo) | ((int8_t)hi<<8)); - if (i%3) val *= -1; + if (i%3) + val *= -1; input_report_rel(dev, REL_X + i, val); } } @@ -168,10 +177,11 @@ static void hil_ptr_process_record(struct hil_ptr *ptr) btn = ptr->data[cnt++]; up = btn & 1; btn &= 0xfe; - if (btn == 0x8e) { + if (btn == 0x8e) continue; /* TODO: proximity == touch? */ - } - else if ((btn > 0x8c) || (btn < 0x80)) continue; + else + if ((btn > 0x8c) || (btn < 0x80)) + continue; btn = (btn - 0x80) >> 1; btn = ptr->btnmap[btn]; input_report_key(dev, btn, !up); @@ -182,14 +192,14 @@ static void hil_ptr_process_record(struct hil_ptr *ptr) up(&ptr->sem); } -static void hil_ptr_process_err(struct hil_ptr *ptr) { +static void hil_ptr_process_err(struct hil_ptr *ptr) +{ printk(KERN_WARNING PREFIX "errored HIL packet\n"); ptr->idx4 = 0; up(&ptr->sem); - return; } -static irqreturn_t hil_ptr_interrupt(struct serio *serio, +static irqreturn_t hil_ptr_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct hil_ptr *ptr; @@ -197,29 +207,29 @@ static irqreturn_t hil_ptr_interrupt(struct serio *serio, int idx; ptr = serio_get_drvdata(serio); - if (ptr == NULL) { - BUG(); - return IRQ_HANDLED; - } + BUG_ON(ptr == NULL); if (ptr->idx4 >= (HIL_PTR_MAX_LENGTH * sizeof(hil_packet))) { hil_ptr_process_err(ptr); return IRQ_HANDLED; } idx = ptr->idx4/4; - if (!(ptr->idx4 % 4)) ptr->data[idx] = 0; + if (!(ptr->idx4 % 4)) + ptr->data[idx] = 0; packet = ptr->data[idx]; packet |= ((hil_packet)data) << ((3 - (ptr->idx4 % 4)) * 8); ptr->data[idx] = packet; /* Records of N 4-byte hil_packets must terminate with a command. */ - if ((++(ptr->idx4)) % 4) return IRQ_HANDLED; + if ((++(ptr->idx4)) % 4) + return IRQ_HANDLED; if ((packet & 0xffff0000) != HIL_ERR_INT) { hil_ptr_process_err(ptr); return IRQ_HANDLED; } - if (packet & HIL_PKT_CMD) + if (packet & HIL_PKT_CMD) hil_ptr_process_record(ptr); + return IRQ_HANDLED; } @@ -228,10 +238,7 @@ static void hil_ptr_disconnect(struct serio *serio) struct hil_ptr *ptr; ptr = serio_get_drvdata(serio); - if (ptr == NULL) { - BUG(); - return; - } + BUG_ON(ptr == NULL); serio_close(serio); input_unregister_device(ptr->dev); @@ -241,7 +248,7 @@ static void hil_ptr_disconnect(struct serio *serio) static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver) { struct hil_ptr *ptr; - char *txt; + const char *txt; unsigned int i, naxsets, btntype; uint8_t did, *idd; @@ -260,34 +267,34 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver) serio_set_drvdata(serio, ptr); ptr->serio = serio; - init_MUTEX_LOCKED(&(ptr->sem)); + init_MUTEX_LOCKED(&ptr->sem); /* Get device info. MLC driver supplies devid/status/etc. */ serio->write(serio, 0); serio->write(serio, 0); serio->write(serio, HIL_PKT_CMD >> 8); serio->write(serio, HIL_CMD_IDD); - down(&(ptr->sem)); + down(&ptr->sem); serio->write(serio, 0); serio->write(serio, 0); serio->write(serio, HIL_PKT_CMD >> 8); serio->write(serio, HIL_CMD_RSC); - down(&(ptr->sem)); + down(&ptr->sem); serio->write(serio, 0); serio->write(serio, 0); serio->write(serio, HIL_PKT_CMD >> 8); serio->write(serio, HIL_CMD_RNM); - down(&(ptr->sem)); + down(&ptr->sem); serio->write(serio, 0); serio->write(serio, 0); serio->write(serio, HIL_PKT_CMD >> 8); serio->write(serio, HIL_CMD_EXD); - down(&(ptr->sem)); + down(&ptr->sem); - up(&(ptr->sem)); + up(&ptr->sem); did = ptr->idd[0]; idd = ptr->idd + 1; @@ -301,12 +308,12 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver) ptr->dev->evbit[0] = BIT(EV_ABS); txt = "absolute"; } - if (!ptr->dev->evbit[0]) { + if (!ptr->dev->evbit[0]) goto bail2; - } ptr->nbtn = HIL_IDD_NUM_BUTTONS(idd); - if (ptr->nbtn) ptr->dev->evbit[0] |= BIT(EV_KEY); + if (ptr->nbtn) + ptr->dev->evbit[0] |= BIT(EV_KEY); naxsets = HIL_IDD_NUM_AXSETS(*idd); ptr->naxes = HIL_IDD_NUM_AXES_PER_SET(*idd); @@ -315,7 +322,7 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver) did, txt); printk(KERN_INFO PREFIX "HIL pointer has %i buttons and %i sets of %i axes\n", ptr->nbtn, naxsets, ptr->naxes); - + btntype = BTN_MISC; if ((did & HIL_IDD_DID_ABS_TABLET_MASK) == HIL_IDD_DID_ABS_TABLET) #ifdef TABLET_SIMULATES_MOUSE @@ -325,7 +332,7 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver) #endif if ((did & HIL_IDD_DID_ABS_TSCREEN_MASK) == HIL_IDD_DID_ABS_TSCREEN) btntype = BTN_TOUCH; - + if ((did & HIL_IDD_DID_REL_MOUSE_MASK) == HIL_IDD_DID_REL_MOUSE) btntype = BTN_MOUSE; @@ -341,12 +348,10 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver) } if ((did & HIL_IDD_DID_TYPE_MASK) == HIL_IDD_DID_TYPE_REL) { - for (i = 0; i < ptr->naxes; i++) { + for (i = 0; i < ptr->naxes; i++) set_bit(REL_X + i, ptr->dev->relbit); - } - for (i = 3; (i < ptr->naxes + 3) && (naxsets > 1); i++) { + for (i = 3; (i < ptr->naxes + 3) && (naxsets > 1); i++) set_bit(REL_X + i, ptr->dev->relbit); - } } else { for (i = 0; i < ptr->naxes; i++) { set_bit(ABS_X + i, ptr->dev->absbit); @@ -419,11 +424,11 @@ static int __init hil_ptr_init(void) { return serio_register_driver(&hil_ptr_serio_driver); } - + static void __exit hil_ptr_exit(void) { serio_unregister_driver(&hil_ptr_serio_driver); } - + module_init(hil_ptr_init); module_exit(hil_ptr_exit); diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c index 0710704..485b074 100644 --- a/drivers/input/serio/hil_mlc.c +++ b/drivers/input/serio/hil_mlc.c @@ -32,11 +32,11 @@ * * Driver theory of operation: * - * Some access methods and an ISR is defined by the sub-driver - * (e.g. hp_sdc_mlc.c). These methods are expected to provide a - * few bits of logic in addition to raw access to the HIL MLC, - * specifically, the ISR, which is entirely registered by the - * sub-driver and invoked directly, must check for record + * Some access methods and an ISR is defined by the sub-driver + * (e.g. hp_sdc_mlc.c). These methods are expected to provide a + * few bits of logic in addition to raw access to the HIL MLC, + * specifically, the ISR, which is entirely registered by the + * sub-driver and invoked directly, must check for record * termination or packet match, at which point a semaphore must * be cleared and then the hil_mlcs_tasklet must be scheduled. * @@ -47,7 +47,7 @@ * itself if output is pending. (This rescheduling should be replaced * at some point with a sub-driver-specific mechanism.) * - * A timer task prods the tasklet once per second to prevent + * A timer task prods the tasklet once per second to prevent * hangups when attached devices do not return expected data * and to initiate probes of the loop for new devices. */ @@ -83,69 +83,85 @@ DECLARE_TASKLET_DISABLED(hil_mlcs_tasklet, hil_mlcs_process, 0); /********************** Device info/instance management **********************/ -static void hil_mlc_clear_di_map (hil_mlc *mlc, int val) { +static void hil_mlc_clear_di_map(hil_mlc *mlc, int val) +{ int j; - for (j = val; j < 7 ; j++) { + + for (j = val; j < 7 ; j++) mlc->di_map[j] = -1; - } } -static void hil_mlc_clear_di_scratch (hil_mlc *mlc) { - memset(&(mlc->di_scratch), 0, sizeof(mlc->di_scratch)); +static void hil_mlc_clear_di_scratch(hil_mlc *mlc) +{ + memset(&mlc->di_scratch, 0, sizeof(mlc->di_scratch)); } -static void hil_mlc_copy_di_scratch (hil_mlc *mlc, int idx) { - memcpy(&(mlc->di[idx]), &(mlc->di_scratch), sizeof(mlc->di_scratch)); +static void hil_mlc_copy_di_scratch(hil_mlc *mlc, int idx) +{ + memcpy(&mlc->di[idx], &mlc->di_scratch, sizeof(mlc->di_scratch)); } -static int hil_mlc_match_di_scratch (hil_mlc *mlc) { +static int hil_mlc_match_di_scratch(hil_mlc *mlc) +{ int idx; for (idx = 0; idx < HIL_MLC_DEVMEM; idx++) { - int j, found; + int j, found = 0; /* In-use slots are not eligible. */ - found = 0; - for (j = 0; j < 7 ; j++) { - if (mlc->di_map[j] == idx) found++; - } - if (found) continue; - if (!memcmp(mlc->di + idx, - &(mlc->di_scratch), - sizeof(mlc->di_scratch))) break; + for (j = 0; j < 7 ; j++) + if (mlc->di_map[j] == idx) + found++; + + if (found) + continue; + + if (!memcmp(mlc->di + idx, &mlc->di_scratch, + sizeof(mlc->di_scratch))) + break; } - return((idx >= HIL_MLC_DEVMEM) ? -1 : idx); + return idx >= HIL_MLC_DEVMEM ? -1 : idx; } -static int hil_mlc_find_free_di(hil_mlc *mlc) { +static int hil_mlc_find_free_di(hil_mlc *mlc) +{ int idx; - /* TODO: Pick all-zero slots first, failing that, - * randomize the slot picked among those eligible. + + /* TODO: Pick all-zero slots first, failing that, + * randomize the slot picked among those eligible. */ for (idx = 0; idx < HIL_MLC_DEVMEM; idx++) { - int j, found; - found = 0; - for (j = 0; j < 7 ; j++) { - if (mlc->di_map[j] == idx) found++; - } - if (!found) break; + int j, found = 0; + + for (j = 0; j < 7 ; j++) + if (mlc->di_map[j] == idx) + found++; + + if (!found) + break; } - return(idx); /* Note: It is guaranteed at least one above will match */ + + return idx; /* Note: It is guaranteed at least one above will match */ } -static inline void hil_mlc_clean_serio_map(hil_mlc *mlc) { +static inline void hil_mlc_clean_serio_map(hil_mlc *mlc) +{ int idx; + for (idx = 0; idx < HIL_MLC_DEVMEM; idx++) { - int j, found; - found = 0; - for (j = 0; j < 7 ; j++) { - if (mlc->di_map[j] == idx) found++; - } - if (!found) mlc->serio_map[idx].di_revmap = -1; + int j, found = 0; + + for (j = 0; j < 7 ; j++) + if (mlc->di_map[j] == idx) + found++; + + if (!found) + mlc->serio_map[idx].di_revmap = -1; } } -static void hil_mlc_send_polls(hil_mlc *mlc) { +static void hil_mlc_send_polls(hil_mlc *mlc) +{ int did, i, cnt; struct serio *serio; struct serio_driver *drv; @@ -157,26 +173,31 @@ static void hil_mlc_send_polls(hil_mlc *mlc) { while (mlc->icount < 15 - i) { hil_packet p; + p = mlc->ipacket[i]; if (did != (p & HIL_PKT_ADDR_MASK) >> 8) { - if (drv == NULL || drv->interrupt == NULL) goto skip; + if (drv && drv->interrupt) { + drv->interrupt(serio, 0, 0); + drv->interrupt(serio, HIL_ERR_INT >> 16, 0); + drv->interrupt(serio, HIL_PKT_CMD >> 8, 0); + drv->interrupt(serio, HIL_CMD_POL + cnt, 0); + } - drv->interrupt(serio, 0, 0); - drv->interrupt(serio, HIL_ERR_INT >> 16, 0); - drv->interrupt(serio, HIL_PKT_CMD >> 8, 0); - drv->interrupt(serio, HIL_CMD_POL + cnt, 0); - skip: did = (p & HIL_PKT_ADDR_MASK) >> 8; serio = did ? mlc->serio[mlc->di_map[did-1]] : NULL; drv = (serio != NULL) ? serio->drv : NULL; cnt = 0; } - cnt++; i++; - if (drv == NULL || drv->interrupt == NULL) continue; - drv->interrupt(serio, (p >> 24), 0); - drv->interrupt(serio, (p >> 16) & 0xff, 0); - drv->interrupt(serio, (p >> 8) & ~HIL_PKT_ADDR_MASK, 0); - drv->interrupt(serio, p & 0xff, 0); + + cnt++; + i++; + + if (drv && drv->interrupt) { + drv->interrupt(serio, (p >> 24), 0); + drv->interrupt(serio, (p >> 16) & 0xff, 0); + drv->interrupt(serio, (p >> 8) & ~HIL_PKT_ADDR_MASK, 0); + drv->interrupt(serio, p & 0xff, 0); + } } } @@ -215,12 +236,16 @@ static void hil_mlc_send_polls(hil_mlc *mlc) { #define HILSEN_DOZE (HILSEN_SAME | HILSEN_SCHED | HILSEN_BREAK) #define HILSEN_SLEEP (HILSEN_SAME | HILSEN_BREAK) -static int hilse_match(hil_mlc *mlc, int unused) { +static int hilse_match(hil_mlc *mlc, int unused) +{ int rc; + rc = hil_mlc_match_di_scratch(mlc); if (rc == -1) { rc = hil_mlc_find_free_di(mlc); - if (rc == -1) goto err; + if (rc == -1) + goto err; + #ifdef HIL_MLC_DEBUG printk(KERN_DEBUG PREFIX "new in slot %i\n", rc); #endif @@ -231,6 +256,7 @@ static int hilse_match(hil_mlc *mlc, int unused) { serio_rescan(mlc->serio[rc]); return -1; } + mlc->di_map[mlc->ddi] = rc; #ifdef HIL_MLC_DEBUG printk(KERN_DEBUG PREFIX "same in slot %i\n", rc); @@ -238,152 +264,177 @@ static int hilse_match(hil_mlc *mlc, int unused) { mlc->serio_map[rc].di_revmap = mlc->ddi; hil_mlc_clean_serio_map(mlc); return 0; + err: printk(KERN_ERR PREFIX "Residual device slots exhausted, close some serios!\n"); return 1; } /* An LCV used to prevent runaway loops, forces 5 second sleep when reset. */ -static int hilse_init_lcv(hil_mlc *mlc, int unused) { +static int hilse_init_lcv(hil_mlc *mlc, int unused) +{ struct timeval tv; do_gettimeofday(&tv); - if(mlc->lcv == 0) goto restart; /* First init, no need to dally */ - if(tv.tv_sec - mlc->lcv_tv.tv_sec < 5) return -1; - restart: + if (mlc->lcv && (tv.tv_sec - mlc->lcv_tv.tv_sec) < 5) + return -1; + mlc->lcv_tv = tv; mlc->lcv = 0; + return 0; } -static int hilse_inc_lcv(hil_mlc *mlc, int lim) { - if (mlc->lcv++ >= lim) return -1; - return 0; +static int hilse_inc_lcv(hil_mlc *mlc, int lim) +{ + return mlc->lcv++ >= lim ? -1 : 0; } #if 0 -static int hilse_set_lcv(hil_mlc *mlc, int val) { +static int hilse_set_lcv(hil_mlc *mlc, int val) +{ mlc->lcv = val; + return 0; } #endif /* Management of the discovered device index (zero based, -1 means no devs) */ -static int hilse_set_ddi(hil_mlc *mlc, int val) { +static int hilse_set_ddi(hil_mlc *mlc, int val) +{ mlc->ddi = val; hil_mlc_clear_di_map(mlc, val + 1); + return 0; } -static int hilse_dec_ddi(hil_mlc *mlc, int unused) { +static int hilse_dec_ddi(hil_mlc *mlc, int unused) +{ mlc->ddi--; - if (mlc->ddi <= -1) { + if (mlc->ddi <= -1) { mlc->ddi = -1; hil_mlc_clear_di_map(mlc, 0); return -1; } hil_mlc_clear_di_map(mlc, mlc->ddi + 1); + return 0; } -static int hilse_inc_ddi(hil_mlc *mlc, int unused) { - if (mlc->ddi >= 6) { - BUG(); - return -1; - } +static int hilse_inc_ddi(hil_mlc *mlc, int unused) +{ + BUG_ON(mlc->ddi >= 6); mlc->ddi++; + return 0; } -static int hilse_take_idd(hil_mlc *mlc, int unused) { +static int hilse_take_idd(hil_mlc *mlc, int unused) +{ int i; - /* Help the state engine: - * Is this a real IDD response or just an echo? + /* Help the state engine: + * Is this a real IDD response or just an echo? * - * Real IDD response does not start with a command. + * Real IDD response does not start with a command. */ - if (mlc->ipacket[0] & HIL_PKT_CMD) goto bail; + if (mlc->ipacket[0] & HIL_PKT_CMD) + goto bail; + /* Should have the command echoed further down. */ for (i = 1; i < 16; i++) { - if (((mlc->ipacket[i] & HIL_PKT_ADDR_MASK) == + if (((mlc->ipacket[i] & HIL_PKT_ADDR_MASK) == (mlc->ipacket[0] & HIL_PKT_ADDR_MASK)) && - (mlc->ipacket[i] & HIL_PKT_CMD) && + (mlc->ipacket[i] & HIL_PKT_CMD) && ((mlc->ipacket[i] & HIL_PKT_DATA_MASK) == HIL_CMD_IDD)) break; } - if (i > 15) goto bail; + if (i > 15) + goto bail; + /* And the rest of the packets should still be clear. */ - while (++i < 16) { - if (mlc->ipacket[i]) break; - } - if (i < 16) goto bail; - for (i = 0; i < 16; i++) { - mlc->di_scratch.idd[i] = + while (++i < 16) + if (mlc->ipacket[i]) + break; + + if (i < 16) + goto bail; + + for (i = 0; i < 16; i++) + mlc->di_scratch.idd[i] = mlc->ipacket[i] & HIL_PKT_DATA_MASK; - } + /* Next step is to see if RSC supported */ - if (mlc->di_scratch.idd[1] & HIL_IDD_HEADER_RSC) + if (mlc->di_scratch.idd[1] & HIL_IDD_HEADER_RSC) return HILSEN_NEXT; - if (mlc->di_scratch.idd[1] & HIL_IDD_HEADER_EXD) + + if (mlc->di_scratch.idd[1] & HIL_IDD_HEADER_EXD) return HILSEN_DOWN | 4; + return 0; + bail: mlc->ddi--; + return -1; /* This should send us off to ACF */ } -static int hilse_take_rsc(hil_mlc *mlc, int unused) { +static int hilse_take_rsc(hil_mlc *mlc, int unused) +{ int i; - for (i = 0; i < 16; i++) { - mlc->di_scratch.rsc[i] = + for (i = 0; i < 16; i++) + mlc->di_scratch.rsc[i] = mlc->ipacket[i] & HIL_PKT_DATA_MASK; - } + /* Next step is to see if EXD supported (IDD has already been read) */ - if (mlc->di_scratch.idd[1] & HIL_IDD_HEADER_EXD) + if (mlc->di_scratch.idd[1] & HIL_IDD_HEADER_EXD) return HILSEN_NEXT; + return 0; } -static int hilse_take_exd(hil_mlc *mlc, int unused) { +static int hilse_take_exd(hil_mlc *mlc, int unused) +{ int i; - for (i = 0; i < 16; i++) { - mlc->di_scratch.exd[i] = + for (i = 0; i < 16; i++) + mlc->di_scratch.exd[i] = mlc->ipacket[i] & HIL_PKT_DATA_MASK; - } + /* Next step is to see if RNM supported. */ - if (mlc->di_scratch.exd[0] & HIL_EXD_HEADER_RNM) + if (mlc->di_scratch.exd[0] & HIL_EXD_HEADER_RNM) return HILSEN_NEXT; + return 0; } -static int hilse_take_rnm(hil_mlc *mlc, int unused) { +static int hilse_take_rnm(hil_mlc *mlc, int unused) +{ int i; - for (i = 0; i < 16; i++) { - mlc->di_scratch.rnm[i] = + for (i = 0; i < 16; i++) + mlc->di_scratch.rnm[i] = mlc->ipacket[i] & HIL_PKT_DATA_MASK; - } - do { - char nam[17]; - snprintf(nam, 16, "%s", mlc->di_scratch.rnm); - nam[16] = '\0'; - printk(KERN_INFO PREFIX "Device name gotten: %s\n", nam); - } while (0); + + printk(KERN_INFO PREFIX "Device name gotten: %16s\n", + mlc->di_scratch.rnm); + return 0; } -static int hilse_operate(hil_mlc *mlc, int repoll) { +static int hilse_operate(hil_mlc *mlc, int repoll) +{ - if (mlc->opercnt == 0) hil_mlcs_probe = 0; + if (mlc->opercnt == 0) + hil_mlcs_probe = 0; mlc->opercnt = 1; hil_mlc_send_polls(mlc); - if (!hil_mlcs_probe) return 0; + if (!hil_mlcs_probe) + return 0; hil_mlcs_probe = 0; mlc->opercnt = 0; return 1; @@ -428,7 +479,7 @@ const struct hilse_node hil_mlc_se[HILSEN_END] = { EXPECT(HIL_ERR_INT | TEST_PACKET(0xa), 2000, HILSEN_NEXT, HILSEN_RESTART, HILSEN_RESTART) OUT(HIL_CTRL_ONLY | 0) /* Disable test mode */ - + /* 9 HILSEN_DHR */ FUNC(hilse_init_lcv, 0, HILSEN_NEXT, HILSEN_SLEEP, 0) @@ -439,7 +490,7 @@ const struct hilse_node hil_mlc_se[HILSEN_END] = { IN(300000, HILSEN_DHR2, HILSEN_DHR2, HILSEN_NEXT) /* 14 HILSEN_IFC */ - OUT(HIL_PKT_CMD | HIL_CMD_IFC) + OUT(HIL_PKT_CMD | HIL_CMD_IFC) EXPECT(HIL_PKT_CMD | HIL_CMD_IFC | HIL_ERR_INT, 20000, HILSEN_DISC, HILSEN_DHR2, HILSEN_NEXT ) @@ -455,7 +506,7 @@ const struct hilse_node hil_mlc_se[HILSEN_END] = { /* 18 HILSEN_HEAL */ OUT_LAST(HIL_CMD_ELB) - EXPECT_LAST(HIL_CMD_ELB | HIL_ERR_INT, + EXPECT_LAST(HIL_CMD_ELB | HIL_ERR_INT, 20000, HILSEN_REPOLL, HILSEN_DSR, HILSEN_NEXT) FUNC(hilse_dec_ddi, 0, HILSEN_HEAL, HILSEN_NEXT, 0) @@ -503,7 +554,7 @@ const struct hilse_node hil_mlc_se[HILSEN_END] = { /* 44 HILSEN_PROBE */ OUT_LAST(HIL_PKT_CMD | HIL_CMD_EPT) - IN(10000, HILSEN_DISC, HILSEN_DSR, HILSEN_NEXT) + IN(10000, HILSEN_DISC, HILSEN_DSR, HILSEN_NEXT) OUT_DISC(HIL_PKT_CMD | HIL_CMD_ELB) IN(10000, HILSEN_DISC, HILSEN_DSR, HILSEN_NEXT) OUT(HIL_PKT_CMD | HIL_CMD_ACF | 1) @@ -514,7 +565,7 @@ const struct hilse_node hil_mlc_se[HILSEN_END] = { /* 52 HILSEN_DSR */ FUNC(hilse_set_ddi, -1, HILSEN_NEXT, 0, 0) OUT(HIL_PKT_CMD | HIL_CMD_DSR) - IN(20000, HILSEN_DHR, HILSEN_DHR, HILSEN_IFC) + IN(20000, HILSEN_DHR, HILSEN_DHR, HILSEN_IFC) /* 55 HILSEN_REPOLL */ OUT(HIL_PKT_CMD | HIL_CMD_RPL) @@ -523,14 +574,15 @@ const struct hilse_node hil_mlc_se[HILSEN_END] = { FUNC(hilse_operate, 1, HILSEN_OPERATE, HILSEN_IFC, HILSEN_PROBE) /* 58 HILSEN_IFCACF */ - OUT(HIL_PKT_CMD | HIL_CMD_IFC) + OUT(HIL_PKT_CMD | HIL_CMD_IFC) EXPECT(HIL_PKT_CMD | HIL_CMD_IFC | HIL_ERR_INT, 20000, HILSEN_ACF2, HILSEN_DHR2, HILSEN_HEAL) /* 60 HILSEN_END */ }; -static inline void hilse_setup_input(hil_mlc *mlc, const struct hilse_node *node) { +static inline void hilse_setup_input(hil_mlc *mlc, const struct hilse_node *node) +{ switch (node->act) { case HILSE_EXPECT_DISC: @@ -555,25 +607,25 @@ static inline void hilse_setup_input(hil_mlc *mlc, const struct hilse_node *node do_gettimeofday(&(mlc->instart)); mlc->icount = 15; memset(mlc->ipacket, 0, 16 * sizeof(hil_packet)); - BUG_ON(down_trylock(&(mlc->isem))); - - return; + BUG_ON(down_trylock(&mlc->isem)); } #ifdef HIL_MLC_DEBUG -static int doze = 0; +static int doze; static int seidx; /* For debug */ #endif -static int hilse_donode (hil_mlc *mlc) { +static int hilse_donode(hil_mlc *mlc) +{ const struct hilse_node *node; int nextidx = 0; int sched_long = 0; unsigned long flags; #ifdef HIL_MLC_DEBUG - if (mlc->seidx && (mlc->seidx != seidx) && mlc->seidx != 41 && mlc->seidx != 42 && mlc->seidx != 43) { - printk(KERN_DEBUG PREFIX "z%i \n {%i}", doze, mlc->seidx); + if (mlc->seidx && mlc->seidx != seidx && + mlc->seidx != 41 && mlc->seidx != 42 && mlc->seidx != 43) { + printk(KERN_DEBUG PREFIX "z%i \n {%i}", doze, mlc->seidx); doze = 0; } @@ -588,50 +640,59 @@ static int hilse_donode (hil_mlc *mlc) { case HILSE_FUNC: BUG_ON(node->object.func == NULL); rc = node->object.func(mlc, node->arg); - nextidx = (rc > 0) ? node->ugly : + nextidx = (rc > 0) ? node->ugly : ((rc < 0) ? node->bad : node->good); - if (nextidx == HILSEN_FOLLOW) nextidx = rc; + if (nextidx == HILSEN_FOLLOW) + nextidx = rc; break; + case HILSE_EXPECT_LAST: case HILSE_EXPECT_DISC: case HILSE_EXPECT: case HILSE_IN: /* Already set up from previous HILSE_OUT_* */ - write_lock_irqsave(&(mlc->lock), flags); + write_lock_irqsave(&mlc->lock, flags); rc = mlc->in(mlc, node->arg); if (rc == 2) { nextidx = HILSEN_DOZE; sched_long = 1; - write_unlock_irqrestore(&(mlc->lock), flags); + write_unlock_irqrestore(&mlc->lock, flags); break; } - if (rc == 1) nextidx = node->ugly; - else if (rc == 0) nextidx = node->good; - else nextidx = node->bad; + if (rc == 1) + nextidx = node->ugly; + else if (rc == 0) + nextidx = node->good; + else + nextidx = node->bad; mlc->istarted = 0; - write_unlock_irqrestore(&(mlc->lock), flags); + write_unlock_irqrestore(&mlc->lock, flags); break; + case HILSE_OUT_LAST: - write_lock_irqsave(&(mlc->lock), flags); + write_lock_irqsave(&mlc->lock, flags); pack = node->object.packet; pack |= ((mlc->ddi + 1) << HIL_PKT_ADDR_SHIFT); goto out; + case HILSE_OUT_DISC: - write_lock_irqsave(&(mlc->lock), flags); + write_lock_irqsave(&mlc->lock, flags); pack = node->object.packet; pack |= ((mlc->ddi + 2) << HIL_PKT_ADDR_SHIFT); goto out; + case HILSE_OUT: - write_lock_irqsave(&(mlc->lock), flags); + write_lock_irqsave(&mlc->lock, flags); pack = node->object.packet; out: - if (mlc->istarted) goto out2; + if (mlc->istarted) + goto out2; /* Prepare to receive input */ if ((node + 1)->act & HILSE_IN) hilse_setup_input(mlc, node + 1); out2: - write_unlock_irqrestore(&(mlc->lock), flags); + write_unlock_irqrestore(&mlc->lock, flags); if (down_trylock(&mlc->osem)) { nextidx = HILSEN_DOZE; @@ -639,37 +700,39 @@ static int hilse_donode (hil_mlc *mlc) { } up(&mlc->osem); - write_lock_irqsave(&(mlc->lock), flags); - if (!(mlc->ostarted)) { + write_lock_irqsave(&mlc->lock, flags); + if (!mlc->ostarted) { mlc->ostarted = 1; mlc->opacket = pack; mlc->out(mlc); nextidx = HILSEN_DOZE; - write_unlock_irqrestore(&(mlc->lock), flags); + write_unlock_irqrestore(&mlc->lock, flags); break; } mlc->ostarted = 0; do_gettimeofday(&(mlc->instart)); - write_unlock_irqrestore(&(mlc->lock), flags); + write_unlock_irqrestore(&mlc->lock, flags); nextidx = HILSEN_NEXT; break; + case HILSE_CTS: nextidx = mlc->cts(mlc) ? node->bad : node->good; break; + default: BUG(); - nextidx = 0; - break; } #ifdef HIL_MLC_DEBUG - if (nextidx == HILSEN_DOZE) doze++; + if (nextidx == HILSEN_DOZE) + doze++; #endif while (nextidx & HILSEN_SCHED) { struct timeval tv; - if (!sched_long) goto sched; + if (!sched_long) + goto sched; do_gettimeofday(&tv); tv.tv_usec += USEC_PER_SEC * (tv.tv_sec - mlc->instart.tv_sec); @@ -682,17 +745,24 @@ static int hilse_donode (hil_mlc *mlc) { sched: tasklet_schedule(&hil_mlcs_tasklet); break; - } - if (nextidx & HILSEN_DOWN) mlc->seidx += nextidx & HILSEN_MASK; - else if (nextidx & HILSEN_UP) mlc->seidx -= nextidx & HILSEN_MASK; - else mlc->seidx = nextidx & HILSEN_MASK; + } + + if (nextidx & HILSEN_DOWN) + mlc->seidx += nextidx & HILSEN_MASK; + else if (nextidx & HILSEN_UP) + mlc->seidx -= nextidx & HILSEN_MASK; + else + mlc->seidx = nextidx & HILSEN_MASK; + + if (nextidx & HILSEN_BREAK) + return 1; - if (nextidx & HILSEN_BREAK) return 1; return 0; } /******************** tasklet context functions **************************/ -static void hil_mlcs_process(unsigned long unused) { +static void hil_mlcs_process(unsigned long unused) +{ struct list_head *tmp; read_lock(&hil_mlcs_lock); @@ -700,19 +770,20 @@ static void hil_mlcs_process(unsigned long unused) { struct hil_mlc *mlc = list_entry(tmp, hil_mlc, list); while (hilse_donode(mlc) == 0) { #ifdef HIL_MLC_DEBUG - if (mlc->seidx != 41 && - mlc->seidx != 42 && - mlc->seidx != 43) - printk(KERN_DEBUG PREFIX " + "); + if (mlc->seidx != 41 && + mlc->seidx != 42 && + mlc->seidx != 43) + printk(KERN_DEBUG PREFIX " + "); #endif - }; + } } read_unlock(&hil_mlcs_lock); } /************************* Keepalive timer task *********************/ -void hil_mlcs_timer (unsigned long data) { +void hil_mlcs_timer(unsigned long data) +{ hil_mlcs_probe = 1; tasklet_schedule(&hil_mlcs_tasklet); /* Re-insert the periodic task. */ @@ -722,28 +793,25 @@ void hil_mlcs_timer (unsigned long data) { /******************** user/kernel context functions **********************/ -static int hil_mlc_serio_write(struct serio *serio, unsigned char c) { +static int hil_mlc_serio_write(struct serio *serio, unsigned char c) +{ struct hil_mlc_serio_map *map; struct hil_mlc *mlc; struct serio_driver *drv; uint8_t *idx, *last; map = serio->port_data; - if (map == NULL) { - BUG(); - return -EIO; - } + BUG_ON(map == NULL); + mlc = map->mlc; - if (mlc == NULL) { - BUG(); - return -EIO; - } - mlc->serio_opacket[map->didx] |= + BUG_ON(mlc == NULL); + + mlc->serio_opacket[map->didx] |= ((hil_packet)c) << (8 * (3 - mlc->serio_oidx[map->didx])); if (mlc->serio_oidx[map->didx] >= 3) { /* for now only commands */ - if (!(mlc->serio_opacket[map->didx] & HIL_PKT_CMD)) + if (!(mlc->serio_opacket[map->didx] & HIL_PKT_CMD)) return -EIO; switch (mlc->serio_opacket[map->didx] & HIL_PKT_DATA_MASK) { case HIL_CMD_IDD: @@ -769,12 +837,11 @@ static int hil_mlc_serio_write(struct serio *serio, unsigned char c) { return -EIO; emu: drv = serio->drv; - if (drv == NULL) { - BUG(); - return -EIO; - } + BUG_ON(drv == NULL); + last = idx + 15; - while ((last != idx) && (*last == 0)) last--; + while ((last != idx) && (*last == 0)) + last--; while (idx != last) { drv->interrupt(serio, 0, 0); @@ -787,14 +854,15 @@ static int hil_mlc_serio_write(struct serio *serio, unsigned char c) { drv->interrupt(serio, HIL_ERR_INT >> 16, 0); drv->interrupt(serio, HIL_PKT_CMD >> 8, 0); drv->interrupt(serio, *idx, 0); - + mlc->serio_oidx[map->didx] = 0; mlc->serio_opacket[map->didx] = 0; return 0; } -static int hil_mlc_serio_open(struct serio *serio) { +static int hil_mlc_serio_open(struct serio *serio) +{ struct hil_mlc_serio_map *map; struct hil_mlc *mlc; @@ -802,33 +870,24 @@ static int hil_mlc_serio_open(struct serio *serio) { return -EBUSY; map = serio->port_data; - if (map == NULL) { - BUG(); - return -ENODEV; - } + BUG_ON(map == NULL); + mlc = map->mlc; - if (mlc == NULL) { - BUG(); - return -ENODEV; - } + BUG_ON(mlc == NULL); return 0; } -static void hil_mlc_serio_close(struct serio *serio) { +static void hil_mlc_serio_close(struct serio *serio) +{ struct hil_mlc_serio_map *map; struct hil_mlc *mlc; map = serio->port_data; - if (map == NULL) { - BUG(); - return; - } + BUG_ON(map == NULL); + mlc = map->mlc; - if (mlc == NULL) { - BUG(); - return; - } + BUG_ON(mlc == NULL); serio_set_drvdata(serio, NULL); serio->drv = NULL; @@ -842,27 +901,26 @@ static const struct serio_device_id hil_mlc_serio_id = { .id = SERIO_ANY, }; -int hil_mlc_register(hil_mlc *mlc) { +int hil_mlc_register(hil_mlc *mlc) +{ int i; - unsigned long flags; + unsigned long flags; - if (mlc == NULL) { - return -EINVAL; - } + BUG_ON(mlc == NULL); mlc->istarted = 0; - mlc->ostarted = 0; + mlc->ostarted = 0; - rwlock_init(&mlc->lock); - init_MUTEX(&(mlc->osem)); + rwlock_init(&mlc->lock); + init_MUTEX(&mlc->osem); - init_MUTEX(&(mlc->isem)); - mlc->icount = -1; - mlc->imatch = 0; + init_MUTEX(&mlc->isem); + mlc->icount = -1; + mlc->imatch = 0; mlc->opercnt = 0; - init_MUTEX_LOCKED(&(mlc->csem)); + init_MUTEX_LOCKED(&(mlc->csem)); hil_mlc_clear_di_scratch(mlc); hil_mlc_clear_di_map(mlc, 0); @@ -897,19 +955,18 @@ int hil_mlc_register(hil_mlc *mlc) { return 0; } -int hil_mlc_unregister(hil_mlc *mlc) { +int hil_mlc_unregister(hil_mlc *mlc) +{ struct list_head *tmp; - unsigned long flags; + unsigned long flags; int i; - if (mlc == NULL) - return -EINVAL; + BUG_ON(mlc == NULL); write_lock_irqsave(&hil_mlcs_lock, flags); - list_for_each(tmp, &hil_mlcs) { + list_for_each(tmp, &hil_mlcs) if (list_entry(tmp, hil_mlc, list) == mlc) goto found; - } /* not found in list */ write_unlock_irqrestore(&hil_mlcs_lock, flags); @@ -918,7 +975,7 @@ int hil_mlc_unregister(hil_mlc *mlc) { found: list_del(tmp); - write_unlock_irqrestore(&hil_mlcs_lock, flags); + write_unlock_irqrestore(&hil_mlcs_lock, flags); for (i = 0; i < HIL_MLC_DEVMEM; i++) { serio_unregister_port(mlc->serio[i]); @@ -942,7 +999,7 @@ static int __init hil_mlc_init(void) return 0; } - + static void __exit hil_mlc_exit(void) { del_timer(&hil_mlcs_kicker); @@ -950,6 +1007,6 @@ static void __exit hil_mlc_exit(void) tasklet_disable(&hil_mlcs_tasklet); tasklet_kill(&hil_mlcs_tasklet); } - + module_init(hil_mlc_init); module_exit(hil_mlc_exit); diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c index 353a8a1..31826e6 100644 --- a/drivers/input/serio/hp_sdc.c +++ b/drivers/input/serio/hp_sdc.c @@ -34,27 +34,27 @@ * * Driver theory of operation: * - * hp_sdc_put does all writing to the SDC. ISR can run on a different - * CPU than hp_sdc_put, but only one CPU runs hp_sdc_put at a time + * hp_sdc_put does all writing to the SDC. ISR can run on a different + * CPU than hp_sdc_put, but only one CPU runs hp_sdc_put at a time * (it cannot really benefit from SMP anyway.) A tasket fit this perfectly. * - * All data coming back from the SDC is sent via interrupt and can be read - * fully in the ISR, so there are no latency/throughput problems there. - * The problem is with output, due to the slow clock speed of the SDC - * compared to the CPU. This should not be too horrible most of the time, - * but if used with HIL devices that support the multibyte transfer command, - * keeping outbound throughput flowing at the 6500KBps that the HIL is + * All data coming back from the SDC is sent via interrupt and can be read + * fully in the ISR, so there are no latency/throughput problems there. + * The problem is with output, due to the slow clock speed of the SDC + * compared to the CPU. This should not be too horrible most of the time, + * but if used with HIL devices that support the multibyte transfer command, + * keeping outbound throughput flowing at the 6500KBps that the HIL is * capable of is more than can be done at HZ=100. * - * Busy polling for IBF clear wastes CPU cycles and bus cycles. hp_sdc.ibf - * is set to 0 when the IBF flag in the status register has cleared. ISR - * may do this, and may also access the parts of queued transactions related - * to reading data back from the SDC, but otherwise will not touch the + * Busy polling for IBF clear wastes CPU cycles and bus cycles. hp_sdc.ibf + * is set to 0 when the IBF flag in the status register has cleared. ISR + * may do this, and may also access the parts of queued transactions related + * to reading data back from the SDC, but otherwise will not touch the * hp_sdc state. Whenever a register is written hp_sdc.ibf is set to 1. * * The i8042 write index and the values in the 4-byte input buffer * starting at 0x70 are kept track of in hp_sdc.wi, and .r7[], respectively, - * to minimize the amount of IO needed to the SDC. However these values + * to minimize the amount of IO needed to the SDC. However these values * do not need to be locked since they are only ever accessed by hp_sdc_put. * * A timer task schedules the tasklet once per second just to make @@ -106,33 +106,39 @@ EXPORT_SYMBOL(hp_sdc_dequeue_transaction); static hp_i8042_sdc hp_sdc; /* All driver state is kept in here. */ /*************** primitives for use in any context *********************/ -static inline uint8_t hp_sdc_status_in8 (void) { +static inline uint8_t hp_sdc_status_in8(void) +{ uint8_t status; unsigned long flags; write_lock_irqsave(&hp_sdc.ibf_lock, flags); status = sdc_readb(hp_sdc.status_io); - if (!(status & HP_SDC_STATUS_IBF)) hp_sdc.ibf = 0; + if (!(status & HP_SDC_STATUS_IBF)) + hp_sdc.ibf = 0; write_unlock_irqrestore(&hp_sdc.ibf_lock, flags); return status; } -static inline uint8_t hp_sdc_data_in8 (void) { - return sdc_readb(hp_sdc.data_io); +static inline uint8_t hp_sdc_data_in8(void) +{ + return sdc_readb(hp_sdc.data_io); } -static inline void hp_sdc_status_out8 (uint8_t val) { +static inline void hp_sdc_status_out8(uint8_t val) +{ unsigned long flags; write_lock_irqsave(&hp_sdc.ibf_lock, flags); hp_sdc.ibf = 1; - if ((val & 0xf0) == 0xe0) hp_sdc.wi = 0xff; + if ((val & 0xf0) == 0xe0) + hp_sdc.wi = 0xff; sdc_writeb(val, hp_sdc.status_io); write_unlock_irqrestore(&hp_sdc.ibf_lock, flags); } -static inline void hp_sdc_data_out8 (uint8_t val) { +static inline void hp_sdc_data_out8(uint8_t val) +{ unsigned long flags; write_lock_irqsave(&hp_sdc.ibf_lock, flags); @@ -141,11 +147,12 @@ static inline void hp_sdc_data_out8 (uint8_t val) { write_unlock_irqrestore(&hp_sdc.ibf_lock, flags); } -/* Care must be taken to only invoke hp_sdc_spin_ibf when - * absolutely needed, or in rarely invoked subroutines. - * Not only does it waste CPU cycles, it also wastes bus cycles. +/* Care must be taken to only invoke hp_sdc_spin_ibf when + * absolutely needed, or in rarely invoked subroutines. + * Not only does it waste CPU cycles, it also wastes bus cycles. */ -static inline void hp_sdc_spin_ibf(void) { +static inline void hp_sdc_spin_ibf(void) +{ unsigned long flags; rwlock_t *lock; @@ -158,19 +165,21 @@ static inline void hp_sdc_spin_ibf(void) { } read_unlock(lock); write_lock(lock); - while (sdc_readb(hp_sdc.status_io) & HP_SDC_STATUS_IBF) {}; + while (sdc_readb(hp_sdc.status_io) & HP_SDC_STATUS_IBF) + { } hp_sdc.ibf = 0; write_unlock_irqrestore(lock, flags); } /************************ Interrupt context functions ************************/ -static void hp_sdc_take (int irq, void *dev_id, uint8_t status, uint8_t data) { +static void hp_sdc_take(int irq, void *dev_id, uint8_t status, uint8_t data) +{ hp_sdc_transaction *curr; read_lock(&hp_sdc.rtq_lock); if (hp_sdc.rcurr < 0) { - read_unlock(&hp_sdc.rtq_lock); + read_unlock(&hp_sdc.rtq_lock); return; } curr = hp_sdc.tq[hp_sdc.rcurr]; @@ -183,25 +192,27 @@ static void hp_sdc_take (int irq, void *dev_id, uint8_t status, uint8_t data) { if (hp_sdc.rqty <= 0) { /* All data has been gathered. */ - if(curr->seq[curr->actidx] & HP_SDC_ACT_SEMAPHORE) { - if (curr->act.semaphore) up(curr->act.semaphore); - } - if(curr->seq[curr->actidx] & HP_SDC_ACT_CALLBACK) { + if (curr->seq[curr->actidx] & HP_SDC_ACT_SEMAPHORE) + if (curr->act.semaphore) + up(curr->act.semaphore); + + if (curr->seq[curr->actidx] & HP_SDC_ACT_CALLBACK) if (curr->act.irqhook) curr->act.irqhook(irq, dev_id, status, data); - } + curr->actidx = curr->idx; curr->idx++; /* Return control of this transaction */ write_lock(&hp_sdc.rtq_lock); - hp_sdc.rcurr = -1; + hp_sdc.rcurr = -1; hp_sdc.rqty = 0; write_unlock(&hp_sdc.rtq_lock); tasklet_schedule(&hp_sdc.task); } } -static irqreturn_t hp_sdc_isr(int irq, void *dev_id) { +static irqreturn_t hp_sdc_isr(int irq, void *dev_id) +{ uint8_t status, data; status = hp_sdc_status_in8(); @@ -209,67 +220,74 @@ static irqreturn_t hp_sdc_isr(int irq, void *dev_id) { data = hp_sdc_data_in8(); /* For now we are ignoring these until we get the SDC to behave. */ - if (((status & 0xf1) == 0x51) && data == 0x82) { - return IRQ_HANDLED; - } + if (((status & 0xf1) == 0x51) && data == 0x82) + return IRQ_HANDLED; - switch(status & HP_SDC_STATUS_IRQMASK) { - case 0: /* This case is not documented. */ + switch (status & HP_SDC_STATUS_IRQMASK) { + case 0: /* This case is not documented. */ break; - case HP_SDC_STATUS_USERTIMER: - case HP_SDC_STATUS_PERIODIC: - case HP_SDC_STATUS_TIMER: + + case HP_SDC_STATUS_USERTIMER: + case HP_SDC_STATUS_PERIODIC: + case HP_SDC_STATUS_TIMER: read_lock(&hp_sdc.hook_lock); - if (hp_sdc.timer != NULL) + if (hp_sdc.timer != NULL) hp_sdc.timer(irq, dev_id, status, data); read_unlock(&hp_sdc.hook_lock); break; - case HP_SDC_STATUS_REG: + + case HP_SDC_STATUS_REG: hp_sdc_take(irq, dev_id, status, data); break; - case HP_SDC_STATUS_HILCMD: - case HP_SDC_STATUS_HILDATA: + + case HP_SDC_STATUS_HILCMD: + case HP_SDC_STATUS_HILDATA: read_lock(&hp_sdc.hook_lock); if (hp_sdc.hil != NULL) hp_sdc.hil(irq, dev_id, status, data); read_unlock(&hp_sdc.hook_lock); break; - case HP_SDC_STATUS_PUP: + + case HP_SDC_STATUS_PUP: read_lock(&hp_sdc.hook_lock); if (hp_sdc.pup != NULL) hp_sdc.pup(irq, dev_id, status, data); - else printk(KERN_INFO PREFIX "HP SDC reports successful PUP.\n"); + else + printk(KERN_INFO PREFIX "HP SDC reports successful PUP.\n"); read_unlock(&hp_sdc.hook_lock); break; - default: + + default: read_lock(&hp_sdc.hook_lock); if (hp_sdc.cooked != NULL) hp_sdc.cooked(irq, dev_id, status, data); read_unlock(&hp_sdc.hook_lock); break; } + return IRQ_HANDLED; } -static irqreturn_t hp_sdc_nmisr(int irq, void *dev_id) { +static irqreturn_t hp_sdc_nmisr(int irq, void *dev_id) +{ int status; - + status = hp_sdc_status_in8(); printk(KERN_WARNING PREFIX "NMI !\n"); -#if 0 +#if 0 if (status & HP_SDC_NMISTATUS_FHS) { read_lock(&hp_sdc.hook_lock); - if (hp_sdc.timer != NULL) + if (hp_sdc.timer != NULL) hp_sdc.timer(irq, dev_id, status, 0); read_unlock(&hp_sdc.hook_lock); - } - else { + } else { /* TODO: pass this on to the HIL handler, or do SAK here? */ printk(KERN_WARNING PREFIX "HIL NMI\n"); } #endif + return IRQ_HANDLED; } @@ -278,13 +296,17 @@ static irqreturn_t hp_sdc_nmisr(int irq, void *dev_id) { unsigned long hp_sdc_put(void); -static void hp_sdc_tasklet(unsigned long foo) { - +static void hp_sdc_tasklet(unsigned long foo) +{ write_lock_irq(&hp_sdc.rtq_lock); + if (hp_sdc.rcurr >= 0) { struct timeval tv; + do_gettimeofday(&tv); - if (tv.tv_sec > hp_sdc.rtv.tv_sec) tv.tv_usec += 1000000; + if (tv.tv_sec > hp_sdc.rtv.tv_sec) + tv.tv_usec += USEC_PER_SEC; + if (tv.tv_usec - hp_sdc.rtv.tv_usec > HP_SDC_MAX_REG_DELAY) { hp_sdc_transaction *curr; uint8_t tmp; @@ -300,27 +322,29 @@ static void hp_sdc_tasklet(unsigned long foo) { hp_sdc.rqty = 0; tmp = curr->seq[curr->actidx]; curr->seq[curr->actidx] |= HP_SDC_ACT_DEAD; - if(tmp & HP_SDC_ACT_SEMAPHORE) { - if (curr->act.semaphore) + if (tmp & HP_SDC_ACT_SEMAPHORE) + if (curr->act.semaphore) up(curr->act.semaphore); - } - if(tmp & HP_SDC_ACT_CALLBACK) { + + if (tmp & HP_SDC_ACT_CALLBACK) { /* Note this means that irqhooks may be called * in tasklet/bh context. */ - if (curr->act.irqhook) + if (curr->act.irqhook) curr->act.irqhook(0, NULL, 0, 0); } + curr->actidx = curr->idx; curr->idx++; - hp_sdc.rcurr = -1; + hp_sdc.rcurr = -1; } } write_unlock_irq(&hp_sdc.rtq_lock); hp_sdc_put(); } -unsigned long hp_sdc_put(void) { +unsigned long hp_sdc_put(void) +{ hp_sdc_transaction *curr; uint8_t act; int idx, curridx; @@ -333,19 +357,24 @@ unsigned long hp_sdc_put(void) { requires output, so we skip to the administrativa. */ if (hp_sdc.ibf) { hp_sdc_status_in8(); - if (hp_sdc.ibf) goto finish; + if (hp_sdc.ibf) + goto finish; } anew: /* See if we are in the middle of a sequence. */ - if (hp_sdc.wcurr < 0) hp_sdc.wcurr = 0; + if (hp_sdc.wcurr < 0) + hp_sdc.wcurr = 0; read_lock_irq(&hp_sdc.rtq_lock); - if (hp_sdc.rcurr == hp_sdc.wcurr) hp_sdc.wcurr++; + if (hp_sdc.rcurr == hp_sdc.wcurr) + hp_sdc.wcurr++; read_unlock_irq(&hp_sdc.rtq_lock); - if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) hp_sdc.wcurr = 0; + if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) + hp_sdc.wcurr = 0; curridx = hp_sdc.wcurr; - if (hp_sdc.tq[curridx] != NULL) goto start; + if (hp_sdc.tq[curridx] != NULL) + goto start; while (++curridx != hp_sdc.wcurr) { if (curridx >= HP_SDC_QUEUE_LEN) { @@ -358,7 +387,8 @@ unsigned long hp_sdc_put(void) { continue; } read_unlock_irq(&hp_sdc.rtq_lock); - if (hp_sdc.tq[curridx] != NULL) break; /* Found one. */ + if (hp_sdc.tq[curridx] != NULL) + break; /* Found one. */ } if (curridx == hp_sdc.wcurr) { /* There's nothing queued to do. */ curridx = -1; @@ -374,7 +404,8 @@ unsigned long hp_sdc_put(void) { goto finish; } - if (hp_sdc.wcurr == -1) goto done; + if (hp_sdc.wcurr == -1) + goto done; curr = hp_sdc.tq[curridx]; idx = curr->actidx; @@ -383,20 +414,23 @@ unsigned long hp_sdc_put(void) { hp_sdc.tq[curridx] = NULL; /* Interleave outbound data between the transactions. */ hp_sdc.wcurr++; - if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) hp_sdc.wcurr = 0; - goto finish; + if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) + hp_sdc.wcurr = 0; + goto finish; } act = curr->seq[idx]; idx++; if (curr->idx >= curr->endidx) { - if (act & HP_SDC_ACT_DEALLOC) kfree(curr); + if (act & HP_SDC_ACT_DEALLOC) + kfree(curr); hp_sdc.tq[curridx] = NULL; /* Interleave outbound data between the transactions. */ hp_sdc.wcurr++; - if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) hp_sdc.wcurr = 0; - goto finish; + if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) + hp_sdc.wcurr = 0; + goto finish; } while (act & HP_SDC_ACT_PRECMD) { @@ -409,9 +443,10 @@ unsigned long hp_sdc_put(void) { curr->idx++; /* act finished? */ if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_PRECMD) - goto actdone; + goto actdone; /* skip quantity field if data-out sequence follows. */ - if (act & HP_SDC_ACT_DATAOUT) curr->idx++; + if (act & HP_SDC_ACT_DATAOUT) + curr->idx++; goto finish; } if (act & HP_SDC_ACT_DATAOUT) { @@ -423,15 +458,15 @@ unsigned long hp_sdc_put(void) { hp_sdc_data_out8(curr->seq[curr->idx]); curr->idx++; /* act finished? */ - if ((curr->idx - idx >= qty) && - ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAOUT)) + if (curr->idx - idx >= qty && + (act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAOUT) goto actdone; goto finish; } idx += qty; act &= ~HP_SDC_ACT_DATAOUT; - } - else while (act & HP_SDC_ACT_DATAREG) { + } else + while (act & HP_SDC_ACT_DATAREG) { int mask; uint8_t w7[4]; @@ -445,26 +480,30 @@ unsigned long hp_sdc_put(void) { act &= ~HP_SDC_ACT_DATAREG; break; } - + w7[0] = (mask & 1) ? curr->seq[++idx] : hp_sdc.r7[0]; w7[1] = (mask & 2) ? curr->seq[++idx] : hp_sdc.r7[1]; w7[2] = (mask & 4) ? curr->seq[++idx] : hp_sdc.r7[2]; w7[3] = (mask & 8) ? curr->seq[++idx] : hp_sdc.r7[3]; - + if (hp_sdc.wi > 0x73 || hp_sdc.wi < 0x70 || - w7[hp_sdc.wi-0x70] == hp_sdc.r7[hp_sdc.wi-0x70]) { + w7[hp_sdc.wi - 0x70] == hp_sdc.r7[hp_sdc.wi - 0x70]) { int i = 0; - /* Need to point the write index register */ - while ((i < 4) && w7[i] == hp_sdc.r7[i]) i++; + /* Need to point the write index register */ + while (i < 4 && w7[i] == hp_sdc.r7[i]) + i++; + if (i < 4) { hp_sdc_status_out8(HP_SDC_CMD_SET_D0 + i); hp_sdc.wi = 0x70 + i; goto finish; } + idx++; if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG) goto actdone; + curr->idx = idx; act &= ~HP_SDC_ACT_DATAREG; break; @@ -476,12 +515,13 @@ unsigned long hp_sdc_put(void) { { int i = 0; - while ((i < 4) && w7[i] == hp_sdc.r7[i]) i++; + while ((i < 4) && w7[i] == hp_sdc.r7[i]) + i++; if (i >= 4) { curr->idx = idx + 1; - if ((act & HP_SDC_ACT_DURING) == + if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG) - goto actdone; + goto actdone; } } goto finish; @@ -497,7 +537,7 @@ unsigned long hp_sdc_put(void) { if (act & HP_SDC_ACT_POSTCMD) { - uint8_t postcmd; + uint8_t postcmd; /* curr->idx should == idx at this point. */ postcmd = curr->seq[idx]; @@ -505,12 +545,12 @@ unsigned long hp_sdc_put(void) { if (act & HP_SDC_ACT_DATAIN) { /* Start a new read */ - hp_sdc.rqty = curr->seq[curr->idx]; + hp_sdc.rqty = curr->seq[curr->idx]; do_gettimeofday(&hp_sdc.rtv); curr->idx++; /* Still need to lock here in case of spurious irq. */ write_lock_irq(&hp_sdc.rtq_lock); - hp_sdc.rcurr = curridx; + hp_sdc.rcurr = curridx; write_unlock_irq(&hp_sdc.rtq_lock); hp_sdc_status_out8(postcmd); goto finish; @@ -519,64 +559,69 @@ unsigned long hp_sdc_put(void) { goto actdone; } -actdone: - if (act & HP_SDC_ACT_SEMAPHORE) { + actdone: + if (act & HP_SDC_ACT_SEMAPHORE) up(curr->act.semaphore); - } - else if (act & HP_SDC_ACT_CALLBACK) { + else if (act & HP_SDC_ACT_CALLBACK) curr->act.irqhook(0,NULL,0,0); - } + if (curr->idx >= curr->endidx) { /* This transaction is over. */ - if (act & HP_SDC_ACT_DEALLOC) kfree(curr); + if (act & HP_SDC_ACT_DEALLOC) + kfree(curr); hp_sdc.tq[curridx] = NULL; - } - else { + } else { curr->actidx = idx + 1; curr->idx = idx + 2; } /* Interleave outbound data between the transactions. */ hp_sdc.wcurr++; - if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) hp_sdc.wcurr = 0; + if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) + hp_sdc.wcurr = 0; finish: - /* If by some quirk IBF has cleared and our ISR has run to + /* If by some quirk IBF has cleared and our ISR has run to see that that has happened, do it all again. */ - if (!hp_sdc.ibf && limit++ < 20) goto anew; + if (!hp_sdc.ibf && limit++ < 20) + goto anew; done: - if (hp_sdc.wcurr >= 0) tasklet_schedule(&hp_sdc.task); + if (hp_sdc.wcurr >= 0) + tasklet_schedule(&hp_sdc.task); write_unlock(&hp_sdc.lock); + return 0; } /******* Functions called in either user or kernel context ****/ -int hp_sdc_enqueue_transaction(hp_sdc_transaction *this) { +int hp_sdc_enqueue_transaction(hp_sdc_transaction *this) +{ unsigned long flags; int i; if (this == NULL) { tasklet_schedule(&hp_sdc.task); return -EINVAL; - }; + } write_lock_irqsave(&hp_sdc.lock, flags); /* Can't have same transaction on queue twice */ - for (i=0; i < HP_SDC_QUEUE_LEN; i++) - if (hp_sdc.tq[i] == this) goto fail; + for (i = 0; i < HP_SDC_QUEUE_LEN; i++) + if (hp_sdc.tq[i] == this) + goto fail; this->actidx = 0; this->idx = 1; /* Search for empty slot */ - for (i=0; i < HP_SDC_QUEUE_LEN; i++) { + for (i = 0; i < HP_SDC_QUEUE_LEN; i++) if (hp_sdc.tq[i] == NULL) { hp_sdc.tq[i] = this; write_unlock_irqrestore(&hp_sdc.lock, flags); tasklet_schedule(&hp_sdc.task); return 0; } - } + write_unlock_irqrestore(&hp_sdc.lock, flags); printk(KERN_WARNING PREFIX "No free slot to add transaction.\n"); return -EBUSY; @@ -587,7 +632,8 @@ int hp_sdc_enqueue_transaction(hp_sdc_transaction *this) { return -EINVAL; } -int hp_sdc_dequeue_transaction(hp_sdc_transaction *this) { +int hp_sdc_dequeue_transaction(hp_sdc_transaction *this) +{ unsigned long flags; int i; @@ -595,8 +641,9 @@ int hp_sdc_dequeue_transaction(hp_sdc_transaction *this) { /* TODO: don't remove it if it's not done. */ - for (i=0; i < HP_SDC_QUEUE_LEN; i++) - if (hp_sdc.tq[i] == this) hp_sdc.tq[i] = NULL; + for (i = 0; i < HP_SDC_QUEUE_LEN; i++) + if (hp_sdc.tq[i] == this) + hp_sdc.tq[i] = NULL; write_unlock_irqrestore(&hp_sdc.lock, flags); return 0; @@ -605,11 +652,11 @@ int hp_sdc_dequeue_transaction(hp_sdc_transaction *this) { /********************** User context functions **************************/ -int hp_sdc_request_timer_irq(hp_sdc_irqhook *callback) { - - if (callback == NULL || hp_sdc.dev == NULL) { +int hp_sdc_request_timer_irq(hp_sdc_irqhook *callback) +{ + if (callback == NULL || hp_sdc.dev == NULL) return -EINVAL; - } + write_lock_irq(&hp_sdc.hook_lock); if (hp_sdc.timer != NULL) { write_unlock_irq(&hp_sdc.hook_lock); @@ -629,11 +676,11 @@ int hp_sdc_request_timer_irq(hp_sdc_irqhook *callback) { return 0; } -int hp_sdc_request_hil_irq(hp_sdc_irqhook *callback) { - - if (callback == NULL || hp_sdc.dev == NULL) { +int hp_sdc_request_hil_irq(hp_sdc_irqhook *callback) +{ + if (callback == NULL || hp_sdc.dev == NULL) return -EINVAL; - } + write_lock_irq(&hp_sdc.hook_lock); if (hp_sdc.hil != NULL) { write_unlock_irq(&hp_sdc.hook_lock); @@ -650,11 +697,11 @@ int hp_sdc_request_hil_irq(hp_sdc_irqhook *callback) { return 0; } -int hp_sdc_request_cooked_irq(hp_sdc_irqhook *callback) { - - if (callback == NULL || hp_sdc.dev == NULL) { +int hp_sdc_request_cooked_irq(hp_sdc_irqhook *callback) +{ + if (callback == NULL || hp_sdc.dev == NULL) return -EINVAL; - } + write_lock_irq(&hp_sdc.hook_lock); if (hp_sdc.cooked != NULL) { write_unlock_irq(&hp_sdc.hook_lock); @@ -672,9 +719,8 @@ int hp_sdc_request_cooked_irq(hp_sdc_irqhook *callback) { return 0; } -int hp_sdc_release_timer_irq(hp_sdc_irqhook *callback) { - - +int hp_sdc_release_timer_irq(hp_sdc_irqhook *callback) +{ write_lock_irq(&hp_sdc.hook_lock); if ((callback != hp_sdc.timer) || (hp_sdc.timer == NULL)) { @@ -694,8 +740,8 @@ int hp_sdc_release_timer_irq(hp_sdc_irqhook *callback) { return 0; } -int hp_sdc_release_hil_irq(hp_sdc_irqhook *callback) { - +int hp_sdc_release_hil_irq(hp_sdc_irqhook *callback) +{ write_lock_irq(&hp_sdc.hook_lock); if ((callback != hp_sdc.hil) || (hp_sdc.hil == NULL)) { @@ -715,8 +761,8 @@ int hp_sdc_release_hil_irq(hp_sdc_irqhook *callback) { return 0; } -int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback) { - +int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback) +{ write_lock_irq(&hp_sdc.hook_lock); if ((callback != hp_sdc.cooked) || (hp_sdc.cooked == NULL)) { @@ -738,7 +784,8 @@ int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback) { /************************* Keepalive timer task *********************/ -void hp_sdc_kicker (unsigned long data) { +void hp_sdc_kicker (unsigned long data) +{ tasklet_schedule(&hp_sdc.task); /* Re-insert the periodic task. */ mod_timer(&hp_sdc.kicker, jiffies + HZ); @@ -750,10 +797,10 @@ void hp_sdc_kicker (unsigned long data) { static const struct parisc_device_id hp_sdc_tbl[] = { { - .hw_type = HPHW_FIO, + .hw_type = HPHW_FIO, .hversion_rev = HVERSION_REV_ANY_ID, .hversion = HVERSION_ANY_ID, - .sversion = 0x73, + .sversion = 0x73, }, { 0, } }; @@ -772,16 +819,15 @@ static struct parisc_driver hp_sdc_driver = { static int __init hp_sdc_init(void) { - int i; char *errstr; hp_sdc_transaction t_sync; uint8_t ts_sync[6]; struct semaphore s_sync; - rwlock_init(&hp_sdc.lock); - rwlock_init(&hp_sdc.ibf_lock); - rwlock_init(&hp_sdc.rtq_lock); - rwlock_init(&hp_sdc.hook_lock); + rwlock_init(&hp_sdc.lock); + rwlock_init(&hp_sdc.ibf_lock); + rwlock_init(&hp_sdc.rtq_lock); + rwlock_init(&hp_sdc.hook_lock); hp_sdc.timer = NULL; hp_sdc.hil = NULL; @@ -796,7 +842,8 @@ static int __init hp_sdc_init(void) hp_sdc.r7[3] = 0xff; hp_sdc.ibf = 1; - for (i = 0; i < HP_SDC_QUEUE_LEN; i++) hp_sdc.tq[i] = NULL; + memset(&hp_sdc.tq, 0, sizeof(hp_sdc.tq)); + hp_sdc.wcurr = -1; hp_sdc.rcurr = -1; hp_sdc.rqty = 0; @@ -804,27 +851,32 @@ static int __init hp_sdc_init(void) hp_sdc.dev_err = -ENODEV; errstr = "IO not found for"; - if (!hp_sdc.base_io) goto err0; + if (!hp_sdc.base_io) + goto err0; errstr = "IRQ not found for"; - if (!hp_sdc.irq) goto err0; + if (!hp_sdc.irq) + goto err0; hp_sdc.dev_err = -EBUSY; #if defined(__hppa__) errstr = "IO not available for"; - if (request_region(hp_sdc.data_io, 2, hp_sdc_driver.name)) goto err0; -#endif + if (request_region(hp_sdc.data_io, 2, hp_sdc_driver.name)) + goto err0; +#endif errstr = "IRQ not available for"; if (request_irq(hp_sdc.irq, &hp_sdc_isr, IRQF_SHARED|IRQF_SAMPLE_RANDOM, - "HP SDC", &hp_sdc)) goto err1; + "HP SDC", &hp_sdc)) + goto err1; errstr = "NMI not available for"; if (request_irq(hp_sdc.nmi, &hp_sdc_nmisr, IRQF_SHARED, - "HP SDC NMI", &hp_sdc)) goto err2; + "HP SDC NMI", &hp_sdc)) + goto err2; - printk(KERN_INFO PREFIX "HP SDC at 0x%p, IRQ %d (NMI IRQ %d)\n", + printk(KERN_INFO PREFIX "HP SDC at 0x%p, IRQ %d (NMI IRQ %d)\n", (void *)hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi); hp_sdc_status_in8(); @@ -858,9 +910,10 @@ static int __init hp_sdc_init(void) err1: release_region(hp_sdc.data_io, 2); err0: - printk(KERN_WARNING PREFIX ": %s SDC IO=0x%p IRQ=0x%x NMI=0x%x\n", + printk(KERN_WARNING PREFIX ": %s SDC IO=0x%p IRQ=0x%x NMI=0x%x\n", errstr, (void *)hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi); hp_sdc.dev = NULL; + return hp_sdc.dev_err; } @@ -868,8 +921,10 @@ static int __init hp_sdc_init(void) static int __init hp_sdc_init_hppa(struct parisc_device *d) { - if (!d) return 1; - if (hp_sdc.dev != NULL) return 1; /* We only expect one SDC */ + if (!d) + return 1; + if (hp_sdc.dev != NULL) + return 1; /* We only expect one SDC */ hp_sdc.dev = d; hp_sdc.irq = d->irq; @@ -906,10 +961,8 @@ static void hp_sdc_exit(void) tasklet_kill(&hp_sdc.task); -/* release_region(hp_sdc.data_io, 2); */ - #if defined(__hppa__) - if (unregister_parisc_driver(&hp_sdc_driver)) + if (unregister_parisc_driver(&hp_sdc_driver)) printk(KERN_WARNING PREFIX "Error unregistering HP SDC"); #endif } @@ -923,7 +976,7 @@ static int __init hp_sdc_register(void) mm_segment_t fs; unsigned char i; #endif - + hp_sdc.dev = NULL; hp_sdc.dev_err = 0; #if defined(__hppa__) @@ -960,8 +1013,8 @@ static int __init hp_sdc_register(void) tq_init.seq = tq_init_seq; tq_init.act.semaphore = &tq_init_sem; - tq_init_seq[0] = - HP_SDC_ACT_POSTCMD | HP_SDC_ACT_DATAIN | HP_SDC_ACT_SEMAPHORE; + tq_init_seq[0] = + HP_SDC_ACT_POSTCMD | HP_SDC_ACT_DATAIN | HP_SDC_ACT_SEMAPHORE; tq_init_seq[1] = HP_SDC_CMD_READ_KCC; tq_init_seq[2] = 1; tq_init_seq[3] = 0; @@ -979,13 +1032,13 @@ static int __init hp_sdc_register(void) } hp_sdc.r11 = tq_init_seq[4]; if (hp_sdc.r11 & HP_SDC_CFG_NEW) { - char *str; + const char *str; printk(KERN_INFO PREFIX "New style SDC\n"); tq_init_seq[1] = HP_SDC_CMD_READ_XTD; tq_init.actidx = 0; tq_init.idx = 1; down(&tq_init_sem); - hp_sdc_enqueue_transaction(&tq_init); + hp_sdc_enqueue_transaction(&tq_init); down(&tq_init_sem); up(&tq_init_sem); if ((tq_init_seq[0] & HP_SDC_ACT_DEAD) == HP_SDC_ACT_DEAD) { @@ -995,15 +1048,13 @@ static int __init hp_sdc_register(void) hp_sdc.r7e = tq_init_seq[4]; HP_SDC_XTD_REV_STRINGS(hp_sdc.r7e & HP_SDC_XTD_REV, str) printk(KERN_INFO PREFIX "Revision: %s\n", str); - if (hp_sdc.r7e & HP_SDC_XTD_BEEPER) { + if (hp_sdc.r7e & HP_SDC_XTD_BEEPER) printk(KERN_INFO PREFIX "TI SN76494 beeper present\n"); - } - if (hp_sdc.r7e & HP_SDC_XTD_BBRTC) { + if (hp_sdc.r7e & HP_SDC_XTD_BBRTC) printk(KERN_INFO PREFIX "OKI MSM-58321 BBRTC present\n"); - } printk(KERN_INFO PREFIX "Spunking the self test register to force PUP " "on next firmware reset.\n"); - tq_init_seq[0] = HP_SDC_ACT_PRECMD | + tq_init_seq[0] = HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT | HP_SDC_ACT_SEMAPHORE; tq_init_seq[1] = HP_SDC_CMD_SET_STR; tq_init_seq[2] = 1; @@ -1012,14 +1063,12 @@ static int __init hp_sdc_register(void) tq_init.idx = 1; tq_init.endidx = 4; down(&tq_init_sem); - hp_sdc_enqueue_transaction(&tq_init); + hp_sdc_enqueue_transaction(&tq_init); down(&tq_init_sem); up(&tq_init_sem); - } - else { - printk(KERN_INFO PREFIX "Old style SDC (1820-%s).\n", + } else + printk(KERN_INFO PREFIX "Old style SDC (1820-%s).\n", (hp_sdc.r11 & HP_SDC_CFG_REV) ? "3300" : "2564/3087"); - } return 0; } @@ -1027,13 +1076,13 @@ static int __init hp_sdc_register(void) module_init(hp_sdc_register); module_exit(hp_sdc_exit); -/* Timing notes: These measurements taken on my 64MHz 7100-LC (715/64) +/* Timing notes: These measurements taken on my 64MHz 7100-LC (715/64) * cycles cycles-adj time * between two consecutive mfctl(16)'s: 4 n/a 63ns * hp_sdc_spin_ibf when idle: 119 115 1.7us * gsc_writeb status register: 83 79 1.2us * IBF to clear after sending SET_IM: 6204 6006 93us - * IBF to clear after sending LOAD_RT: 4467 4352 68us + * IBF to clear after sending LOAD_RT: 4467 4352 68us * IBF to clear after sending two LOAD_RTs: 18974 18859 295us * READ_T1, read status/data, IRQ, call handler: 35564 n/a 556us * cmd to ~IBF READ_T1 2nd time right after: 5158403 n/a 81ms diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c index 1f131ff..cb0b288 100644 --- a/drivers/input/serio/hp_sdc_mlc.c +++ b/drivers/input/serio/hp_sdc_mlc.c @@ -58,12 +58,13 @@ struct hp_sdc_mlc_priv_s { } hp_sdc_mlc_priv; /************************* Interrupt context ******************************/ -static void hp_sdc_mlc_isr (int irq, void *dev_id, - uint8_t status, uint8_t data) { - int idx; +static void hp_sdc_mlc_isr (int irq, void *dev_id, + uint8_t status, uint8_t data) +{ + int idx; hil_mlc *mlc = &hp_sdc_mlc; - write_lock(&(mlc->lock)); + write_lock(&mlc->lock); if (mlc->icount < 0) { printk(KERN_WARNING PREFIX "HIL Overflow!\n"); up(&mlc->isem); @@ -73,239 +74,247 @@ static void hp_sdc_mlc_isr (int irq, void *dev_id, if ((status & HP_SDC_STATUS_IRQMASK) == HP_SDC_STATUS_HILDATA) { mlc->ipacket[idx] |= data | HIL_ERR_INT; mlc->icount--; - if (hp_sdc_mlc_priv.got5x) goto check; - if (!idx) goto check; - if ((mlc->ipacket[idx-1] & HIL_PKT_ADDR_MASK) != + if (hp_sdc_mlc_priv.got5x || !idx) + goto check; + if ((mlc->ipacket[idx - 1] & HIL_PKT_ADDR_MASK) != (mlc->ipacket[idx] & HIL_PKT_ADDR_MASK)) { mlc->ipacket[idx] &= ~HIL_PKT_ADDR_MASK; - mlc->ipacket[idx] |= (mlc->ipacket[idx-1] - & HIL_PKT_ADDR_MASK); + mlc->ipacket[idx] |= (mlc->ipacket[idx - 1] + & HIL_PKT_ADDR_MASK); } goto check; } /* We know status is 5X */ - if (data & HP_SDC_HIL_ISERR) goto err; - mlc->ipacket[idx] = + if (data & HP_SDC_HIL_ISERR) + goto err; + mlc->ipacket[idx] = (data & HP_SDC_HIL_R1MASK) << HIL_PKT_ADDR_SHIFT; hp_sdc_mlc_priv.got5x = 1; goto out; check: hp_sdc_mlc_priv.got5x = 0; - if (mlc->imatch == 0) goto done; - if ((mlc->imatch == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL)) - && (mlc->ipacket[idx] == (mlc->imatch | idx))) goto done; - if (mlc->ipacket[idx] == mlc->imatch) goto done; + if (mlc->imatch == 0) + goto done; + if ((mlc->imatch == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL)) + && (mlc->ipacket[idx] == (mlc->imatch | idx))) + goto done; + if (mlc->ipacket[idx] == mlc->imatch) + goto done; goto out; - err: + err: printk(KERN_DEBUG PREFIX "err code %x\n", data); + switch (data) { case HP_SDC_HIL_RC_DONE: printk(KERN_WARNING PREFIX "Bastard SDC reconfigured loop!\n"); break; + case HP_SDC_HIL_ERR: - mlc->ipacket[idx] |= HIL_ERR_INT | HIL_ERR_PERR | - HIL_ERR_FERR | HIL_ERR_FOF; + mlc->ipacket[idx] |= HIL_ERR_INT | HIL_ERR_PERR | + HIL_ERR_FERR | HIL_ERR_FOF; break; + case HP_SDC_HIL_TO: mlc->ipacket[idx] |= HIL_ERR_INT | HIL_ERR_LERR; break; + case HP_SDC_HIL_RC: printk(KERN_WARNING PREFIX "Bastard SDC decided to reconfigure loop!\n"); break; + default: printk(KERN_WARNING PREFIX "Unkown HIL Error status (%x)!\n", data); break; } + /* No more data will be coming due to an error. */ done: tasklet_schedule(mlc->tasklet); - up(&(mlc->isem)); + up(&mlc->isem); out: - write_unlock(&(mlc->lock)); + write_unlock(&mlc->lock); } /******************** Tasklet or userspace context functions ****************/ -static int hp_sdc_mlc_in (hil_mlc *mlc, suseconds_t timeout) { +static int hp_sdc_mlc_in(hil_mlc *mlc, suseconds_t timeout) +{ unsigned long flags; struct hp_sdc_mlc_priv_s *priv; int rc = 2; priv = mlc->priv; - write_lock_irqsave(&(mlc->lock), flags); + write_lock_irqsave(&mlc->lock, flags); /* Try to down the semaphore */ - if (down_trylock(&(mlc->isem))) { + if (down_trylock(&mlc->isem)) { struct timeval tv; if (priv->emtestmode) { - mlc->ipacket[0] = - HIL_ERR_INT | (mlc->opacket & - (HIL_PKT_CMD | - HIL_PKT_ADDR_MASK | + mlc->ipacket[0] = + HIL_ERR_INT | (mlc->opacket & + (HIL_PKT_CMD | + HIL_PKT_ADDR_MASK | HIL_PKT_DATA_MASK)); mlc->icount = 14; /* printk(KERN_DEBUG PREFIX ">[%x]\n", mlc->ipacket[0]); */ goto wasup; } do_gettimeofday(&tv); - tv.tv_usec += 1000000 * (tv.tv_sec - mlc->instart.tv_sec); + tv.tv_usec += USEC_PER_SEC * (tv.tv_sec - mlc->instart.tv_sec); if (tv.tv_usec - mlc->instart.tv_usec > mlc->intimeout) { - /* printk("!%i %i", - tv.tv_usec - mlc->instart.tv_usec, - mlc->intimeout); - */ + /* printk("!%i %i", + tv.tv_usec - mlc->instart.tv_usec, + mlc->intimeout); + */ rc = 1; - up(&(mlc->isem)); + up(&mlc->isem); } goto done; } wasup: - up(&(mlc->isem)); + up(&mlc->isem); rc = 0; goto done; done: - write_unlock_irqrestore(&(mlc->lock), flags); + write_unlock_irqrestore(&mlc->lock, flags); return rc; } -static int hp_sdc_mlc_cts (hil_mlc *mlc) { +static int hp_sdc_mlc_cts(hil_mlc *mlc) +{ struct hp_sdc_mlc_priv_s *priv; unsigned long flags; - priv = mlc->priv; + priv = mlc->priv; - write_lock_irqsave(&(mlc->lock), flags); + write_lock_irqsave(&mlc->lock, flags); /* Try to down the semaphores -- they should be up. */ - if (down_trylock(&(mlc->isem))) { - BUG(); - goto busy; - } - if (down_trylock(&(mlc->osem))) { - BUG(); - up(&(mlc->isem)); - goto busy; - } - up(&(mlc->isem)); - up(&(mlc->osem)); + BUG_ON(down_trylock(&mlc->isem)); + BUG_ON(down_trylock(&mlc->osem)); - if (down_trylock(&(mlc->csem))) { - if (priv->trans.act.semaphore != &(mlc->csem)) goto poll; - goto busy; + up(&mlc->isem); + up(&mlc->osem); + + if (down_trylock(&mlc->csem)) { + if (priv->trans.act.semaphore != &mlc->csem) + goto poll; + else + goto busy; } - if (!(priv->tseq[4] & HP_SDC_USE_LOOP)) goto done; + + if (!(priv->tseq[4] & HP_SDC_USE_LOOP)) + goto done; poll: - priv->trans.act.semaphore = &(mlc->csem); + priv->trans.act.semaphore = &mlc->csem; priv->trans.actidx = 0; priv->trans.idx = 1; priv->trans.endidx = 5; - priv->tseq[0] = + priv->tseq[0] = HP_SDC_ACT_POSTCMD | HP_SDC_ACT_DATAIN | HP_SDC_ACT_SEMAPHORE; priv->tseq[1] = HP_SDC_CMD_READ_USE; priv->tseq[2] = 1; priv->tseq[3] = 0; priv->tseq[4] = 0; - hp_sdc_enqueue_transaction(&(priv->trans)); + hp_sdc_enqueue_transaction(&priv->trans); busy: - write_unlock_irqrestore(&(mlc->lock), flags); + write_unlock_irqrestore(&mlc->lock, flags); return 1; done: - priv->trans.act.semaphore = &(mlc->osem); - up(&(mlc->csem)); - write_unlock_irqrestore(&(mlc->lock), flags); + priv->trans.act.semaphore = &mlc->osem; + up(&mlc->csem); + write_unlock_irqrestore(&mlc->lock, flags); return 0; } -static void hp_sdc_mlc_out (hil_mlc *mlc) { +static void hp_sdc_mlc_out(hil_mlc *mlc) +{ struct hp_sdc_mlc_priv_s *priv; unsigned long flags; priv = mlc->priv; - write_lock_irqsave(&(mlc->lock), flags); - + write_lock_irqsave(&mlc->lock, flags); + /* Try to down the semaphore -- it should be up. */ - if (down_trylock(&(mlc->osem))) { - BUG(); - goto done; - } + BUG_ON(down_trylock(&mlc->osem)); - if (mlc->opacket & HIL_DO_ALTER_CTRL) goto do_control; + if (mlc->opacket & HIL_DO_ALTER_CTRL) + goto do_control; do_data: if (priv->emtestmode) { - up(&(mlc->osem)); + up(&mlc->osem); goto done; } /* Shouldn't be sending commands when loop may be busy */ - if (down_trylock(&(mlc->csem))) { - BUG(); - goto done; - } - up(&(mlc->csem)); + BUG_ON(down_trylock(&mlc->csem)); + up(&mlc->csem); priv->trans.actidx = 0; priv->trans.idx = 1; - priv->trans.act.semaphore = &(mlc->osem); + priv->trans.act.semaphore = &mlc->osem; priv->trans.endidx = 6; - priv->tseq[0] = + priv->tseq[0] = HP_SDC_ACT_DATAREG | HP_SDC_ACT_POSTCMD | HP_SDC_ACT_SEMAPHORE; priv->tseq[1] = 0x7; - priv->tseq[2] = - (mlc->opacket & + priv->tseq[2] = + (mlc->opacket & (HIL_PKT_ADDR_MASK | HIL_PKT_CMD)) >> HIL_PKT_ADDR_SHIFT; - priv->tseq[3] = - (mlc->opacket & HIL_PKT_DATA_MASK) + priv->tseq[3] = + (mlc->opacket & HIL_PKT_DATA_MASK) >> HIL_PKT_DATA_SHIFT; priv->tseq[4] = 0; /* No timeout */ - if (priv->tseq[3] == HIL_CMD_DHR) priv->tseq[4] = 1; + if (priv->tseq[3] == HIL_CMD_DHR) + priv->tseq[4] = 1; priv->tseq[5] = HP_SDC_CMD_DO_HIL; goto enqueue; do_control: priv->emtestmode = mlc->opacket & HIL_CTRL_TEST; - + /* we cannot emulate this, it should not be used. */ BUG_ON((mlc->opacket & (HIL_CTRL_APE | HIL_CTRL_IPF)) == HIL_CTRL_APE); - - if ((mlc->opacket & HIL_CTRL_ONLY) == HIL_CTRL_ONLY) goto control_only; - if (mlc->opacket & HIL_CTRL_APE) { - BUG(); /* Should not send command/data after engaging APE */ - goto done; - } - /* Disengaging APE this way would not be valid either since + + if ((mlc->opacket & HIL_CTRL_ONLY) == HIL_CTRL_ONLY) + goto control_only; + + /* Should not send command/data after engaging APE */ + BUG_ON(mlc->opacket & HIL_CTRL_APE); + + /* Disengaging APE this way would not be valid either since * the loop must be allowed to idle. * - * So, it works out that we really never actually send control - * and data when using SDC, we just send the data. + * So, it works out that we really never actually send control + * and data when using SDC, we just send the data. */ goto do_data; control_only: priv->trans.actidx = 0; priv->trans.idx = 1; - priv->trans.act.semaphore = &(mlc->osem); + priv->trans.act.semaphore = &mlc->osem; priv->trans.endidx = 4; - priv->tseq[0] = + priv->tseq[0] = HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT | HP_SDC_ACT_SEMAPHORE; priv->tseq[1] = HP_SDC_CMD_SET_LPC; priv->tseq[2] = 1; - // priv->tseq[3] = (mlc->ddc + 1) | HP_SDC_LPS_ACSUCC; + /* priv->tseq[3] = (mlc->ddc + 1) | HP_SDC_LPS_ACSUCC; */ priv->tseq[3] = 0; if (mlc->opacket & HIL_CTRL_APE) { priv->tseq[3] |= HP_SDC_LPC_APE_IPF; - down_trylock(&(mlc->csem)); - } + down_trylock(&mlc->csem); + } enqueue: - hp_sdc_enqueue_transaction(&(priv->trans)); + hp_sdc_enqueue_transaction(&priv->trans); done: - write_unlock_irqrestore(&(mlc->lock), flags); + write_unlock_irqrestore(&mlc->lock, flags); } static int __init hp_sdc_mlc_init(void) @@ -316,14 +325,13 @@ static int __init hp_sdc_mlc_init(void) hp_sdc_mlc_priv.emtestmode = 0; hp_sdc_mlc_priv.trans.seq = hp_sdc_mlc_priv.tseq; - hp_sdc_mlc_priv.trans.act.semaphore = &(mlc->osem); + hp_sdc_mlc_priv.trans.act.semaphore = &mlc->osem; hp_sdc_mlc_priv.got5x = 0; - mlc->cts = &hp_sdc_mlc_cts; - mlc->in = &hp_sdc_mlc_in; - mlc->out = &hp_sdc_mlc_out; - - mlc->priv = &hp_sdc_mlc_priv; + mlc->cts = &hp_sdc_mlc_cts; + mlc->in = &hp_sdc_mlc_in; + mlc->out = &hp_sdc_mlc_out; + mlc->priv = &hp_sdc_mlc_priv; if (hil_mlc_register(mlc)) { printk(KERN_WARNING PREFIX "Failed to register MLC structure with hil_mlc\n"); @@ -336,10 +344,9 @@ static int __init hp_sdc_mlc_init(void) } return 0; err1: - if (hil_mlc_unregister(mlc)) { + if (hil_mlc_unregister(mlc)) printk(KERN_ERR PREFIX "Failed to unregister MLC structure with hil_mlc.\n" "This is bad. Could cause an oops.\n"); - } err0: return -EBUSY; } @@ -347,14 +354,14 @@ static int __init hp_sdc_mlc_init(void) static void __exit hp_sdc_mlc_exit(void) { hil_mlc *mlc = &hp_sdc_mlc; - if (hp_sdc_release_hil_irq(&hp_sdc_mlc_isr)) { + + if (hp_sdc_release_hil_irq(&hp_sdc_mlc_isr)) printk(KERN_ERR PREFIX "Failed to release the raw HIL ISR hook.\n" "This is bad. Could cause an oops.\n"); - } - if (hil_mlc_unregister(mlc)) { + + if (hil_mlc_unregister(mlc)) printk(KERN_ERR PREFIX "Failed to unregister MLC structure with hil_mlc.\n" "This is bad. Could cause an oops.\n"); - } } module_init(hp_sdc_mlc_init); -- cgit v0.10.2 From 243db53bbd8503065b21fd6e8265387048eb569b Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 7 Mar 2007 01:44:59 -0500 Subject: Input: psmouse - do not force stream mode Forcing stream mode after reset confuses some devices (reported by Andrea Arcangeli) so let's take it out - spec says that after reset mouse should already be in stream mode. Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index eb63855..33a3251 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -836,12 +836,6 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) static void psmouse_initialize(struct psmouse *psmouse) { /* - * We set the mouse into streaming mode. - */ - - ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSTREAM); - -/* * We set the mouse report rate, resolution and scaling. */ -- cgit v0.10.2 From bc413c9563db6d596e841b2756ed3fccc48de5c0 Mon Sep 17 00:00:00 2001 From: Eric Piel Date: Wed, 7 Mar 2007 01:45:16 -0500 Subject: Input: wistron - add support for TravelMate 610 Add support for Acer TravelMate 610 to wistron_btns. All special keys are detected, but the 2 leds are not handled (yet). Signed-off-by: Eric Piel Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c index e1183ae..39e43759 100644 --- a/drivers/input/misc/wistron_btns.c +++ b/drivers/input/misc/wistron_btns.c @@ -324,6 +324,21 @@ static struct key_entry keymap_acer_travelmate_240[] = { { KE_END, 0 } }; +/* Wifi subsystem only activates the led. Therefore we need to pass + * wifi event as a normal key, then userspace can really change the wifi state. + * TODO we need to export led state to userspace (wifi and mail) */ +static struct key_entry keymap_acer_travelmate_610[] = { + { KE_KEY, 0x01, KEY_HELP }, + { KE_KEY, 0x02, KEY_CONFIG }, + { KE_KEY, 0x11, KEY_PROG1 }, + { KE_KEY, 0x12, KEY_PROG2 }, + { KE_KEY, 0x13, KEY_PROG3 }, + { KE_KEY, 0x14, KEY_MAIL }, + { KE_KEY, 0x15, KEY_WWW }, + { KE_KEY, 0x40, KEY_WLAN }, /* Wifi */ + { KE_END, 0 } +}; + static struct key_entry keymap_aopen_1559as[] = { { KE_KEY, 0x01, KEY_HELP }, { KE_KEY, 0x06, KEY_PROG3 }, @@ -408,6 +423,15 @@ static struct dmi_system_id dmi_ids[] __initdata = { }, { .callback = dmi_matched, + .ident = "Acer TravelMate 610", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ACER"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 610"), + }, + .driver_data = keymap_acer_travelmate_610 + }, + { + .callback = dmi_matched, .ident = "AOpen 1559AS", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "E2U"), -- cgit v0.10.2 From 15e03ae811475c2beebfde18717935ee9ce64617 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 7 Mar 2007 23:20:17 -0500 Subject: Input: export 'uniq' in /proc/bus/input/devices Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/input.c b/drivers/input/input.c index a9a706f..9b3bfce 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -439,6 +439,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); + seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); seq_printf(seq, "H: Handlers="); list_for_each_entry(handle, &dev->h_list, d_node) -- cgit v0.10.2 From cb9def4dff9fe7e3d3114eba4e2d89f52265e22c Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 7 Mar 2007 23:20:26 -0500 Subject: Input: let driver core create class device attribute groups Rely on device core to create attribute groups for input devices instead of open-coding it. Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/input.c b/drivers/input/input.c index 9b3bfce..4486402 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -754,6 +754,13 @@ static struct attribute_group input_dev_caps_attr_group = { .attrs = input_dev_caps_attrs, }; +static struct attribute_group *input_dev_attr_groups[] = { + &input_dev_attr_group, + &input_dev_id_attr_group, + &input_dev_caps_attr_group, + NULL +}; + static void input_dev_release(struct class_device *class_dev) { struct input_dev *dev = to_input_dev(class_dev); @@ -907,6 +914,7 @@ struct input_dev *input_allocate_device(void) dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); if (dev) { dev->cdev.class = &input_class; + dev->cdev.groups = input_dev_attr_groups; class_device_initialize(&dev->cdev); mutex_init(&dev->mutex); INIT_LIST_HEAD(&dev->h_list); @@ -979,18 +987,6 @@ int input_register_device(struct input_dev *dev) if (error) return error; - error = sysfs_create_group(&dev->cdev.kobj, &input_dev_attr_group); - if (error) - goto fail1; - - error = sysfs_create_group(&dev->cdev.kobj, &input_dev_id_attr_group); - if (error) - goto fail2; - - error = sysfs_create_group(&dev->cdev.kobj, &input_dev_caps_attr_group); - if (error) - goto fail3; - path = kobject_get_path(&dev->cdev.kobj, GFP_KERNEL); printk(KERN_INFO "input: %s as %s\n", dev->name ? dev->name : "Unspecified device", path ? path : "N/A"); @@ -1008,11 +1004,6 @@ int input_register_device(struct input_dev *dev) input_wakeup_procfs_readers(); return 0; - - fail3: sysfs_remove_group(&dev->cdev.kobj, &input_dev_id_attr_group); - fail2: sysfs_remove_group(&dev->cdev.kobj, &input_dev_attr_group); - fail1: class_device_del(&dev->cdev); - return error; } EXPORT_SYMBOL(input_register_device); @@ -1037,10 +1028,6 @@ void input_unregister_device(struct input_dev *dev) list_del_init(&dev->node); - sysfs_remove_group(&dev->cdev.kobj, &input_dev_caps_attr_group); - sysfs_remove_group(&dev->cdev.kobj, &input_dev_id_attr_group); - sysfs_remove_group(&dev->cdev.kobj, &input_dev_attr_group); - class_device_unregister(&dev->cdev); input_wakeup_procfs_readers(); -- cgit v0.10.2 From ba863a0016a33637acc7888698a5d75096fcec05 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Fri, 9 Mar 2007 10:27:31 -0600 Subject: JFS: document uid, gid, and umask mount options in jfs.txt Signed-off-by: Dave Kleikamp diff --git a/Documentation/filesystems/jfs.txt b/Documentation/filesystems/jfs.txt index bae1286..26ebde7 100644 --- a/Documentation/filesystems/jfs.txt +++ b/Documentation/filesystems/jfs.txt @@ -29,7 +29,13 @@ errors=continue Keep going on a filesystem error. errors=remount-ro Default. Remount the filesystem read-only on an error. errors=panic Panic and halt the machine if an error occurs. -Please send bugs, comments, cards and letters to shaggy@austin.ibm.com. +uid=value Override on-disk uid with specified value +gid=value Override on-disk gid with specified value +umask=value Override on-disk umask with specified octal value. For + directories, the execute bit will be set if the corresponding + read bit is set. + +Please send bugs, comments, cards and letters to shaggy@linux.vnet.ibm.com. The JFS mailing list can be subscribed to by using the link labeled "Mail list Subscribe" at our web page http://jfs.sourceforge.net/ -- cgit v0.10.2 From 3038e353cfaf548eb94f02b172b9dbe412abd24c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 19 Dec 2006 19:58:27 -0500 Subject: firewire: Add core firewire stack. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Kristian Høgsberg Signed-off-by: Stefan Richter diff --git a/drivers/Kconfig b/drivers/Kconfig index 050323f..9c52a04 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -30,6 +30,8 @@ source "drivers/md/Kconfig" source "drivers/message/fusion/Kconfig" +source "drivers/firewire/Kconfig" + source "drivers/ieee1394/Kconfig" source "drivers/message/i2o/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 3a718f5..57d92da 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_FC4) += fc4/ obj-$(CONFIG_SCSI) += scsi/ obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_FUSION) += message/ +obj-$(CONFIG_FW) += firewire/ obj-$(CONFIG_IEEE1394) += ieee1394/ obj-y += cdrom/ obj-y += auxdisplay/ diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig new file mode 100644 index 0000000..bdd6303 --- /dev/null +++ b/drivers/firewire/Kconfig @@ -0,0 +1,23 @@ +# -*- shell-script -*- + +menu "IEEE 1394 (FireWire) support (JUJU alternative stack)" + +config FW + tristate "IEEE 1394 (FireWire) support (JUJU alternative stack)" + help + IEEE 1394 describes a high performance serial bus, which is also + known as FireWire(tm) or i.Link(tm) and is used for connecting all + sorts of devices (most notably digital video cameras) to your + computer. + + If you have FireWire hardware and want to use it, say Y here. This + is the core support only, you will also need to select a driver for + your IEEE 1394 adapter. + + This is the "JUJU" firewire stack, an alternative + implementation designed for roboustness and simplicity. + + To compile this driver as a module, say M here: the + module will be called fw-core. + +endmenu diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile new file mode 100644 index 0000000..db7020d --- /dev/null +++ b/drivers/firewire/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Linux IEEE 1394 implementation +# + +fw-core-objs := fw-card.o fw-topology.o fw-transaction.o fw-iso.o + +obj-$(CONFIG_FW) += fw-core.o diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c new file mode 100644 index 0000000..d8abd70 --- /dev/null +++ b/drivers/firewire/fw-card.c @@ -0,0 +1,384 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-card.c - card level functions + * + * Copyright (C) 2005-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include "fw-transaction.h" +#include "fw-topology.h" + +/* The lib/crc16.c implementation uses the standard (0x8005) + * polynomial, but we need the ITU-T (or CCITT) polynomial (0x1021). + * The implementation below works on an array of host-endian u32 + * words, assuming they'll be transmited msb first. */ +static u16 +crc16_itu_t(const u32 *buffer, size_t length) +{ + int shift, i; + u32 data; + u16 sum, crc = 0; + + for (i = 0; i < length; i++) { + data = *buffer++; + for (shift = 28; shift >= 0; shift -= 4 ) { + sum = ((crc >> 12) ^ (data >> shift)) & 0xf; + crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum); + } + crc &= 0xffff; + } + + return crc; +} + +static LIST_HEAD(card_list); + +static LIST_HEAD(descriptor_list); +static int descriptor_count; + +#define bib_crc(v) ((v) << 0) +#define bib_crc_length(v) ((v) << 16) +#define bib_info_length(v) ((v) << 24) + +#define bib_link_speed(v) ((v) << 0) +#define bib_generation(v) ((v) << 4) +#define bib_max_rom(v) ((v) << 8) +#define bib_max_receive(v) ((v) << 12) +#define bib_cyc_clk_acc(v) ((v) << 16) +#define bib_pmc ((1) << 27) +#define bib_bmc ((1) << 28) +#define bib_isc ((1) << 29) +#define bib_cmc ((1) << 30) +#define bib_imc ((1) << 31) + +static u32 * +generate_config_rom (struct fw_card *card, size_t *config_rom_length) +{ + struct fw_descriptor *desc; + static u32 config_rom[256]; + int i, j, length; + + /* Initialize contents of config rom buffer. On the OHCI + * controller, block reads to the config rom accesses the host + * memory, but quadlet read access the hardware bus info block + * registers. That's just crack, but it means we should make + * sure the contents of bus info block in host memory mathces + * the version stored in the OHCI registers. */ + + memset(config_rom, 0, sizeof config_rom); + config_rom[0] = bib_crc_length(4) | bib_info_length(4) | bib_crc(0); + config_rom[1] = 0x31333934; + + config_rom[2] = + bib_link_speed(card->link_speed) | + bib_generation(card->config_rom_generation++ % 14 + 2) | + bib_max_rom(2) | + bib_max_receive(card->max_receive) | + bib_isc | bib_cmc | bib_imc; + config_rom[3] = card->guid >> 32; + config_rom[4] = card->guid; + + /* Generate root directory. */ + i = 5; + config_rom[i++] = 0; + config_rom[i++] = 0x0c0083c0; /* node capabilities */ + config_rom[i++] = 0x03d00d1e; /* vendor id */ + j = i + descriptor_count; + + /* Generate root directory entries for descriptors. */ + list_for_each_entry (desc, &descriptor_list, link) { + config_rom[i] = desc->key | (j - i); + i++; + j += desc->length; + } + + /* Update root directory length. */ + config_rom[5] = (i - 5 - 1) << 16; + + /* End of root directory, now copy in descriptors. */ + list_for_each_entry (desc, &descriptor_list, link) { + memcpy(&config_rom[i], desc->data, desc->length * 4); + i += desc->length; + } + + /* Calculate CRCs for all blocks in the config rom. This + * assumes that CRC length and info length are identical for + * the bus info block, which is always the case for this + * implementation. */ + for (i = 0; i < j; i += length + 1) { + length = (config_rom[i] >> 16) & 0xff; + config_rom[i] |= crc16_itu_t(&config_rom[i + 1], length); + } + + *config_rom_length = j; + + return config_rom; +} + +static void +update_config_roms (void) +{ + struct fw_card *card; + u32 *config_rom; + size_t length; + + list_for_each_entry (card, &card_list, link) { + config_rom = generate_config_rom(card, &length); + card->driver->set_config_rom(card, config_rom, length); + } +} + +int +fw_core_add_descriptor (struct fw_descriptor *desc) +{ + size_t i; + + /* Check descriptor is valid; the length of all blocks in the + * descriptor has to add up to exactly the length of the + * block. */ + i = 0; + while (i < desc->length) + i += (desc->data[i] >> 16) + 1; + + if (i != desc->length) + return -1; + + down_write(&fw_bus_type.subsys.rwsem); + + list_add_tail (&desc->link, &descriptor_list); + descriptor_count++; + update_config_roms(); + + up_write(&fw_bus_type.subsys.rwsem); + + return 0; +} +EXPORT_SYMBOL(fw_core_add_descriptor); + +void +fw_core_remove_descriptor (struct fw_descriptor *desc) +{ + down_write(&fw_bus_type.subsys.rwsem); + + list_del(&desc->link); + descriptor_count--; + update_config_roms(); + + up_write(&fw_bus_type.subsys.rwsem); +} +EXPORT_SYMBOL(fw_core_remove_descriptor); + +static void +release_card(struct device *device) +{ + struct fw_card *card = + container_of(device, struct fw_card, card_device); + + kfree(card); +} + +static void +flush_timer_callback(unsigned long data) +{ + struct fw_card *card = (struct fw_card *)data; + + fw_flush_transactions(card); +} + +void +fw_card_initialize(struct fw_card *card, struct fw_card_driver *driver, + struct device *device) +{ + static int index; + + card->index = index++; + card->driver = driver; + card->device = device; + card->current_tlabel = 0; + card->tlabel_mask = 0; + card->color = 0; + + INIT_LIST_HEAD(&card->transaction_list); + spin_lock_init(&card->lock); + setup_timer(&card->flush_timer, + flush_timer_callback, (unsigned long)card); + + card->local_node = NULL; + + card->card_device.bus = &fw_bus_type; + card->card_device.release = release_card; + card->card_device.parent = card->device; + snprintf(card->card_device.bus_id, sizeof card->card_device.bus_id, + "fwcard%d", card->index); + + device_initialize(&card->card_device); +} +EXPORT_SYMBOL(fw_card_initialize); + +int +fw_card_add(struct fw_card *card, + u32 max_receive, u32 link_speed, u64 guid) +{ + int retval; + u32 *config_rom; + size_t length; + + card->max_receive = max_receive; + card->link_speed = link_speed; + card->guid = guid; + + /* FIXME: add #define's for phy registers. */ + /* Activate link_on bit and contender bit in our self ID packets.*/ + if (card->driver->update_phy_reg(card, 4, 0, 0x80 | 0x40) < 0) + return -EIO; + + retval = device_add(&card->card_device); + if (retval < 0) { + fw_error("Failed to register card device."); + return retval; + } + + /* The subsystem grabs a reference when the card is added and + * drops it when the driver calls fw_core_remove_card. */ + fw_card_get(card); + + down_write(&fw_bus_type.subsys.rwsem); + config_rom = generate_config_rom (card, &length); + list_add_tail(&card->link, &card_list); + up_write(&fw_bus_type.subsys.rwsem); + + return card->driver->enable(card, config_rom, length); +} +EXPORT_SYMBOL(fw_card_add); + + +/* The next few functions implements a dummy driver that use once a + * card driver shuts down an fw_card. This allows the driver to + * cleanly unload, as all IO to the card will be handled by the dummy + * driver instead of calling into the (possibly) unloaded module. The + * dummy driver just fails all IO. */ + +static int +dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) +{ + BUG(); + return -1; +} + +static int +dummy_update_phy_reg(struct fw_card *card, int address, + int clear_bits, int set_bits) +{ + return -ENODEV; +} + +static int +dummy_set_config_rom(struct fw_card *card, + u32 *config_rom, size_t length) +{ + /* We take the card out of card_list before setting the dummy + * driver, so this should never get called. */ + BUG(); + return -1; +} + +static void +dummy_send_request(struct fw_card *card, struct fw_packet *packet) +{ + packet->callback(packet, card, -ENODEV); +} + +static void +dummy_send_response(struct fw_card *card, struct fw_packet *packet) +{ + packet->callback(packet, card, -ENODEV); +} + +static int +dummy_enable_phys_dma(struct fw_card *card, + int node_id, int generation) +{ + return -ENODEV; +} + +static struct fw_card_driver dummy_driver = { + .name = "dummy", + .enable = dummy_enable, + .update_phy_reg = dummy_update_phy_reg, + .set_config_rom = dummy_set_config_rom, + .send_request = dummy_send_request, + .send_response = dummy_send_response, + .enable_phys_dma = dummy_enable_phys_dma +}; + +void +fw_core_remove_card(struct fw_card *card) +{ + card->driver->update_phy_reg(card, 4, 0x80 | 0x40, 0); + fw_core_initiate_bus_reset(card, 1); + + down_write(&fw_bus_type.subsys.rwsem); + list_del(&card->link); + up_write(&fw_bus_type.subsys.rwsem); + + /* Set up the dummy driver. */ + card->driver = &dummy_driver; + + fw_flush_transactions(card); + + fw_destroy_nodes(card); + + /* This also drops the subsystem reference. */ + device_unregister(&card->card_device); +} +EXPORT_SYMBOL(fw_core_remove_card); + +struct fw_card * +fw_card_get(struct fw_card *card) +{ + get_device(&card->card_device); + + return card; +} +EXPORT_SYMBOL(fw_card_get); + +/* An assumption for fw_card_put() is that the card driver allocates + * the fw_card struct with kalloc and that it has been shut down + * before the last ref is dropped. */ +void +fw_card_put(struct fw_card *card) +{ + put_device(&card->card_device); +} +EXPORT_SYMBOL(fw_card_put); + +int +fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) +{ + u32 address; + + if (short_reset) + address = 5; + else + address = 1; + + return card->driver->update_phy_reg(card, address, 0, 0x40); +} +EXPORT_SYMBOL(fw_core_initiate_bus_reset); diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c new file mode 100644 index 0000000..61548c4 --- /dev/null +++ b/drivers/firewire/fw-iso.c @@ -0,0 +1,136 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-iso.c - Isochronous IO + * Copyright (C) 2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include + +#include "fw-transaction.h" +#include "fw-topology.h" + +static int +setup_iso_buffer(struct fw_iso_context *ctx, size_t size, + enum dma_data_direction direction) +{ + struct page *page; + int i; + void *p; + + ctx->buffer_size = PAGE_ALIGN(size); + if (size == 0) + return 0; + + ctx->buffer = vmalloc_32_user(ctx->buffer_size); + if (ctx->buffer == NULL) + return -ENOMEM; + + ctx->page_count = ctx->buffer_size >> PAGE_SHIFT; + ctx->pages = + kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL); + if (ctx->pages == NULL) { + vfree(ctx->buffer); + return -ENOMEM; + } + + p = ctx->buffer; + for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) { + page = vmalloc_to_page(p); + ctx->pages[i] = dma_map_page(ctx->card->device, + page, 0, PAGE_SIZE, direction); + } + + return 0; +} + +static void destroy_iso_buffer(struct fw_iso_context *ctx) +{ + int i; + + for (i = 0; i < ctx->page_count; i++) + dma_unmap_page(ctx->card->device, ctx->pages[i], + PAGE_SIZE, DMA_TO_DEVICE); + + kfree(ctx->pages); + vfree(ctx->buffer); +} + +struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type, + size_t buffer_size, + fw_iso_callback_t callback, + void *callback_data) +{ + struct fw_iso_context *ctx; + int retval; + + ctx = card->driver->allocate_iso_context(card, type); + if (IS_ERR(ctx)) + return ctx; + + ctx->card = card; + ctx->type = type; + ctx->callback = callback; + ctx->callback_data = callback_data; + + retval = setup_iso_buffer(ctx, buffer_size, DMA_TO_DEVICE); + if (retval < 0) { + card->driver->free_iso_context(ctx); + return ERR_PTR(retval); + } + + return ctx; +} + +EXPORT_SYMBOL(fw_iso_context_create); + +void fw_iso_context_destroy(struct fw_iso_context *ctx) +{ + struct fw_card *card = ctx->card; + + destroy_iso_buffer(ctx); + + card->driver->free_iso_context(ctx); +} + +EXPORT_SYMBOL(fw_iso_context_destroy); + +int +fw_iso_context_send(struct fw_iso_context *ctx, + int channel, int speed, int cycle) +{ + ctx->channel = channel; + ctx->speed = speed; + + return ctx->card->driver->send_iso(ctx, cycle); +} + +EXPORT_SYMBOL(fw_iso_context_send); + +int +fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, void *payload) +{ + struct fw_card *card = ctx->card; + + return card->driver->queue_iso(ctx, packet, payload); +} + +EXPORT_SYMBOL(fw_iso_context_queue); diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c new file mode 100644 index 0000000..2778aa3 --- /dev/null +++ b/drivers/firewire/fw-topology.c @@ -0,0 +1,446 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-topology.c - Incremental bus scan, based on bus topology + * + * Copyright (C) 2004-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include "fw-transaction.h" +#include "fw-topology.h" + +#define self_id_phy_id(q) (((q) >> 24) & 0x3f) +#define self_id_extended(q) (((q) >> 23) & 0x01) +#define self_id_link_on(q) (((q) >> 22) & 0x01) +#define self_id_gap_count(q) (((q) >> 16) & 0x3f) +#define self_id_phy_speed(q) (((q) >> 14) & 0x03) +#define self_id_contender(q) (((q) >> 11) & 0x01) +#define self_id_phy_initiator(q) (((q) >> 1) & 0x01) +#define self_id_more_packets(q) (((q) >> 0) & 0x01) + +#define self_id_ext_sequence(q) (((q) >> 20) & 0x07) + +static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) +{ + u32 q; + int port_type, shift, seq; + + *total_port_count = 0; + *child_port_count = 0; + + shift = 6; + q = *sid; + seq = 0; + + while (1) { + port_type = (q >> shift) & 0x03; + switch (port_type) { + case SELFID_PORT_CHILD: + (*child_port_count)++; + case SELFID_PORT_PARENT: + case SELFID_PORT_NCONN: + (*total_port_count)++; + case SELFID_PORT_NONE: + break; + } + + shift -= 2; + if (shift == 0) { + if (!self_id_more_packets(q)) + return sid + 1; + + shift = 16; + sid++; + q = *sid; + + /* Check that the extra packets actually are + * extended self ID packets and that the + * sequence numbers in the extended self ID + * packets increase as expected. */ + + if (!self_id_extended(q) || + seq != self_id_ext_sequence(q)) + return NULL; + + seq++; + } + } +} + +static int get_port_type(u32 *sid, int port_index) +{ + int index, shift; + + index = (port_index + 5) / 8; + shift = 16 - ((port_index + 5) & 7) * 2; + return (sid[index] >> shift) & 0x03; +} + +struct fw_node *fw_node_create(u32 sid, int port_count, int color) +{ + struct fw_node *node; + + node = kzalloc(sizeof *node + port_count * sizeof node->ports[0], + GFP_ATOMIC); + if (node == NULL) + return NULL; + + node->color = color; + node->node_id = self_id_phy_id(sid); + node->link_on = self_id_link_on(sid); + node->phy_speed = self_id_phy_speed(sid); + node->port_count = port_count; + + atomic_set(&node->ref_count, 1); + INIT_LIST_HEAD(&node->link); + + return node; +} + +/** + * build_tree - Build the tree representation of the topology + * @self_ids: array of self IDs to create the tree from + * @self_id_count: the length of the self_ids array + * @local_id: the node ID of the local node + * + * This function builds the tree representation of the topology given + * by the self IDs from the latest bus reset. During the construction + * of the tree, the function checks that the self IDs are valid and + * internally consistent. On succcess this funtions returns the + * fw_node corresponding to the local card otherwise NULL. + */ +static struct fw_node *build_tree(struct fw_card *card) +{ + struct fw_node *node, *child, *local_node; + struct list_head stack, *h; + u32 *sid, *next_sid, *end, q; + int i, port_count, child_port_count, phy_id, parent_count, stack_depth; + + local_node = NULL; + node = NULL; + INIT_LIST_HEAD(&stack); + stack_depth = 0; + sid = card->self_ids; + end = sid + card->self_id_count; + phy_id = 0; + card->irm_node = NULL; + + while (sid < end) { + next_sid = count_ports(sid, &port_count, &child_port_count); + + if (next_sid == NULL) { + fw_error("Inconsistent extended self IDs.\n"); + return NULL; + } + + q = *sid; + if (phy_id != self_id_phy_id(q)) { + fw_error("PHY ID mismatch in self ID: %d != %d.\n", + phy_id, self_id_phy_id(q)); + return NULL; + } + + if (child_port_count > stack_depth) { + fw_error("Topology stack underflow\n"); + return NULL; + } + + /* Seek back from the top of our stack to find the + * start of the child nodes for this node. */ + for (i = 0, h = &stack; i < child_port_count; i++) + h = h->prev; + child = fw_node(h); + + node = fw_node_create(q, port_count, card->color); + if (node == NULL) { + fw_error("Out of memory while building topology."); + return NULL; + } + + if (phy_id == (card->node_id & 0x3f)) + local_node = node; + + if (self_id_contender(q)) + card->irm_node = node; + + parent_count = 0; + + for (i = 0; i < port_count; i++) { + switch (get_port_type(sid, i)) { + case SELFID_PORT_PARENT: + /* Who's your daddy? We dont know the + * parent node at this time, so we + * temporarily abuse node->color for + * remembering the entry in the + * node->ports array where the parent + * node should be. Later, when we + * handle the parent node, we fix up + * the reference. + */ + parent_count++; + node->color = i; + break; + + case SELFID_PORT_CHILD: + node->ports[i].node = child; + /* Fix up parent reference for this + * child node. */ + child->ports[child->color].node = node; + child->color = card->color; + child = fw_node(child->link.next); + break; + } + } + + /* Check that the node reports exactly one parent + * port, except for the root, which of course should + * have no parents. */ + if ((next_sid == end && parent_count != 0) || + (next_sid < end && parent_count != 1)) { + fw_error("Parent port inconsistency for node %d: " + "parent_count=%d\n", phy_id, parent_count); + return NULL; + } + + /* Pop the child nodes off the stack and push the new node. */ + __list_del(h->prev, &stack); + list_add_tail(&node->link, &stack); + stack_depth += 1 - child_port_count; + + sid = next_sid; + phy_id++; + } + + card->root_node = node; + + return local_node; +} + +typedef void (*fw_node_callback_t) (struct fw_card * card, + struct fw_node * node, + struct fw_node * parent); + +static void +for_each_fw_node(struct fw_card *card, struct fw_node *root, + fw_node_callback_t callback) +{ + struct list_head list; + struct fw_node *node, *next, *child, *parent; + int i; + + INIT_LIST_HEAD(&list); + + fw_node_get(root); + list_add_tail(&root->link, &list); + parent = NULL; + list_for_each_entry(node, &list, link) { + node->color = card->color; + + for (i = 0; i < node->port_count; i++) { + child = node->ports[i].node; + if (!child) + continue; + if (child->color == card->color) + parent = child; + else { + fw_node_get(child); + list_add_tail(&child->link, &list); + } + } + + callback(card, node, parent); + } + + list_for_each_entry_safe(node, next, &list, link) + fw_node_put(node); +} + +static void +report_lost_node(struct fw_card *card, + struct fw_node *node, struct fw_node *parent) +{ + fw_node_event(card, node, FW_NODE_DESTROYED); + fw_node_put(node); +} + +static void +report_found_node(struct fw_card *card, + struct fw_node *node, struct fw_node *parent) +{ + int b_path = (node->phy_speed == SCODE_BETA); + + if (parent != NULL) { + node->max_speed = min(parent->max_speed, node->phy_speed); + node->b_path = parent->b_path && b_path; + } else { + node->max_speed = node->phy_speed; + node->b_path = b_path; + } + + fw_node_event(card, node, FW_NODE_CREATED); +} + +void fw_destroy_nodes(struct fw_card *card) +{ + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + card->color++; + if (card->local_node != NULL) + for_each_fw_node(card, card->local_node, report_lost_node); + spin_unlock_irqrestore(&card->lock, flags); +} + +static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) +{ + struct fw_node *tree; + int i; + + tree = node1->ports[port].node; + node0->ports[port].node = tree; + for (i = 0; i < tree->port_count; i++) { + if (tree->ports[i].node == node1) { + tree->ports[i].node = node0; + break; + } + } +} + +/** + * update_tree - compare the old topology tree for card with the new + * one specified by root. Queue the nodes and mark them as either + * found, lost or updated. Update the nodes in the card topology tree + * as we go. + */ +static void +update_tree(struct fw_card *card, struct fw_node *root, int *changed) +{ + struct list_head list0, list1; + struct fw_node *node0, *node1; + int i, event; + + INIT_LIST_HEAD(&list0); + list_add_tail(&card->local_node->link, &list0); + INIT_LIST_HEAD(&list1); + list_add_tail(&root->link, &list1); + + node0 = fw_node(list0.next); + node1 = fw_node(list1.next); + *changed = 0; + + while (&node0->link != &list0) { + + /* assert(node0->port_count == node1->port_count); */ + if (node0->link_on && !node1->link_on) + event = FW_NODE_LINK_OFF; + else if (!node0->link_on && node1->link_on) + event = FW_NODE_LINK_ON; + else + event = FW_NODE_UPDATED; + + node0->node_id = node1->node_id; + node0->color = card->color; + node0->link_on = node1->link_on; + node0->initiated_reset = node1->initiated_reset; + node1->color = card->color; + fw_node_event(card, node0, event); + + if (card->root_node == node1) + card->root_node = node0; + if (card->irm_node == node1) + card->irm_node = node0; + + for (i = 0; i < node0->port_count; i++) { + if (node0->ports[i].node && node1->ports[i].node) { + /* This port didn't change, queue the + * connected node for further + * investigation. */ + if (node0->ports[i].node->color == card->color) + continue; + list_add_tail(&node0->ports[i].node->link, + &list0); + list_add_tail(&node1->ports[i].node->link, + &list1); + } else if (node0->ports[i].node) { + /* The nodes connected here were + * unplugged; unref the lost nodes and + * queue FW_NODE_LOST callbacks for + * them. */ + + for_each_fw_node(card, node0->ports[i].node, + report_lost_node); + node0->ports[i].node = NULL; + *changed = 1; + } else if (node1->ports[i].node) { + /* One or more node were connected to + * this port. Move the new nodes into + * the tree and queue FW_NODE_CREATED + * callbacks for them. */ + move_tree(node0, node1, i); + for_each_fw_node(card, node0->ports[i].node, + report_found_node); + *changed = 1; + } + } + + node0 = fw_node(node0->link.next); + node1 = fw_node(node1->link.next); + } +} + +void +fw_core_handle_bus_reset(struct fw_card *card, + int node_id, int generation, + int self_id_count, u32 * self_ids) +{ + struct fw_node *local_node; + unsigned long flags; + int changed; + + fw_flush_transactions(card); + + spin_lock_irqsave(&card->lock, flags); + + card->node_id = node_id; + card->self_id_count = self_id_count; + card->generation = generation; + memcpy(card->self_ids, self_ids, self_id_count * 4); + + local_node = build_tree(card); + + card->color++; + + if (local_node == NULL) { + fw_error("topology build failed\n"); + /* FIXME: We need to issue a bus reset in this case. */ + } else if (card->local_node == NULL) { + card->local_node = local_node; + for_each_fw_node(card, local_node, report_found_node); + } else { + update_tree(card, local_node, &changed); + } + + spin_unlock_irqrestore(&card->lock, flags); +} + +EXPORT_SYMBOL(fw_core_handle_bus_reset); + +void fw_node_event(struct fw_card *card, struct fw_node *node, int event) +{ +} diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h new file mode 100644 index 0000000..7582d6e --- /dev/null +++ b/drivers/firewire/fw-topology.h @@ -0,0 +1,84 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-topology.h -- Incremental bus scan, based on bus topology + * + * Copyright (C) 2003-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __fw_topology_h +#define __fw_topology_h + +enum { + FW_NODE_CREATED = 0x00, + FW_NODE_UPDATED = 0x01, + FW_NODE_DESTROYED = 0x02, + FW_NODE_LINK_ON = 0x03, + FW_NODE_LINK_OFF = 0x04 +}; + +struct fw_port { + struct fw_node *node; + unsigned speed : 3; /* S100, S200, ... S3200 */ +}; + +struct fw_node { + u16 node_id; + u8 color; + u8 port_count; + unsigned link_on : 1; + unsigned initiated_reset : 1; + unsigned b_path : 1; + u8 phy_speed; /* As in the self ID packet. */ + u8 max_speed; /* Minimum of all phy-speeds and port speeds on + * the path from the local node to this node. */ + + atomic_t ref_count; + + /* For serializing node topology into a list. */ + struct list_head link; + + /* Upper layer specific data. */ + void *data; + + struct fw_port ports[0]; +}; + +extern inline struct fw_node * +fw_node(struct list_head *l) +{ + return list_entry (l, struct fw_node, link); +} + +extern inline struct fw_node * +fw_node_get(struct fw_node *node) +{ + atomic_inc(&node->ref_count); + + return node; +} + +extern inline void +fw_node_put(struct fw_node *node) +{ + if (atomic_dec_and_test(&node->ref_count)) + kfree(node); +} + +void +fw_destroy_nodes(struct fw_card *card); + +#endif diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c new file mode 100644 index 0000000..c3acf74 --- /dev/null +++ b/drivers/firewire/fw-transaction.c @@ -0,0 +1,730 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-transaction.c - core IEEE1394 transaction logic + * + * Copyright (C) 2004-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fw-transaction.h" +#include "fw-topology.h" + +#define header_pri(pri) ((pri) << 0) +#define header_tcode(tcode) ((tcode) << 4) +#define header_retry(retry) ((retry) << 8) +#define header_tlabel(tlabel) ((tlabel) << 10) +#define header_destination(destination) ((destination) << 16) +#define header_source(source) ((source) << 16) +#define header_rcode(rcode) ((rcode) << 12) +#define header_offset_high(offset_high) ((offset_high) << 0) +#define header_data_length(length) ((length) << 16) +#define header_extended_tcode(tcode) ((tcode) << 0) + +#define header_get_tcode(q) (((q) >> 4) & 0x0f) +#define header_get_tlabel(q) (((q) >> 10) & 0x3f) +#define header_get_rcode(q) (((q) >> 4) & 0x0f) +#define header_get_destination(q) (((q) >> 16) & 0xffff) +#define header_get_source(q) (((q) >> 16) & 0xffff) +#define header_get_offset_high(q) (((q) >> 0) & 0xffff) +#define header_get_data_length(q) (((q) >> 16) & 0xffff) +#define header_get_extended_tcode(q) (((q) >> 0) & 0xffff) + +#define phy_config_gap_count(gap_count) (((gap_count) << 16) | (1 << 22)) +#define phy_config_root_id(node_id) (((node_id) << 24) | (1 << 23)) +#define phy_identifier(id) ((id) << 30) + +static void +close_transaction(struct fw_transaction *t, struct fw_card *card, int rcode, + u32 * payload, size_t length) +{ + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + card->tlabel_mask &= ~(1 << t->tlabel); + list_del(&t->link); + spin_unlock_irqrestore(&card->lock, flags); + + t->callback(card, rcode, payload, length, t->callback_data); +} + +static void +transmit_complete_callback(struct fw_packet *packet, + struct fw_card *card, int status) +{ + struct fw_transaction *t = + container_of(packet, struct fw_transaction, packet); + + switch (status) { + case ACK_COMPLETE: + close_transaction(t, card, RCODE_COMPLETE, NULL, 0); + break; + case ACK_PENDING: + t->timestamp = packet->timestamp; + break; + case ACK_BUSY_X: + case ACK_BUSY_A: + case ACK_BUSY_B: + close_transaction(t, card, RCODE_BUSY, NULL, 0); + break; + case ACK_DATA_ERROR: + case ACK_TYPE_ERROR: + close_transaction(t, card, RCODE_SEND_ERROR, NULL, 0); + break; + default: + /* FIXME: In this case, status is a negative errno, + * corresponding to an OHCI specific transmit error + * code. We should map that to an RCODE instead of + * just the generic RCODE_SEND_ERROR. */ + close_transaction(t, card, RCODE_SEND_ERROR, NULL, 0); + break; + } +} + +void +fw_fill_packet(struct fw_packet *packet, int tcode, int tlabel, + int node_id, int generation, int speed, + unsigned long long offset, void *payload, size_t length) +{ + int ext_tcode; + + if (tcode > 0x10) { + ext_tcode = tcode - 0x10; + tcode = TCODE_LOCK_REQUEST; + } else + ext_tcode = 0; + + packet->header[0] = + header_retry(RETRY_X) | + header_tlabel(tlabel) | + header_tcode(tcode) | + header_destination(node_id | LOCAL_BUS); + packet->header[1] = + header_offset_high(offset >> 32) | header_source(0); + packet->header[2] = + offset; + + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + packet->header[3] = *(u32 *)payload; + packet->header_length = 16; + packet->payload_length = 0; + break; + + case TCODE_LOCK_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + packet->header[3] = + header_data_length(length) | + header_extended_tcode(ext_tcode); + packet->header_length = 16; + packet->payload = payload; + packet->payload_length = length; + break; + + case TCODE_READ_QUADLET_REQUEST: + packet->header_length = 12; + packet->payload_length = 0; + break; + + case TCODE_READ_BLOCK_REQUEST: + packet->header[3] = + header_data_length(length) | + header_extended_tcode(ext_tcode); + packet->header_length = 16; + packet->payload_length = 0; + break; + } + + packet->speed = speed; + packet->generation = generation; +} + +/** + * This function provides low-level access to the IEEE1394 transaction + * logic. Most C programs would use either fw_read(), fw_write() or + * fw_lock() instead - those function are convenience wrappers for + * this function. The fw_send_request() function is primarily + * provided as a flexible, one-stop entry point for languages bindings + * and protocol bindings. + * + * FIXME: Document this function further, in particular the possible + * values for rcode in the callback. In short, we map ACK_COMPLETE to + * RCODE_COMPLETE, internal errors set errno and set rcode to + * RCODE_SEND_ERROR (which is out of range for standard ieee1394 + * rcodes). All other rcodes are forwarded unchanged. For all + * errors, payload is NULL, length is 0. + * + * Can not expect the callback to be called before the function + * returns, though this does happen in some cases (ACK_COMPLETE and + * errors). + * + * The payload is only used for write requests and must not be freed + * until the callback has been called. + * + * @param card the card from which to send the request + * @param tcode the tcode for this transaction. Do not use + * TCODE_LOCK_REQUEST directly, insted use TCODE_LOCK_MASK_SWAP + * etc. to specify tcode and ext_tcode. + * @param node_id the node_id of the destination node + * @param generation the generation for which node_id is valid + * @param speed the speed to use for sending the request + * @param offset the 48 bit offset on the destination node + * @param payload the data payload for the request subaction + * @param length the length in bytes of the data to read + * @param callback function to be called when the transaction is completed + * @param callback_data pointer to arbitrary data, which will be + * passed to the callback + */ +void +fw_send_request(struct fw_card *card, struct fw_transaction *t, + int tcode, int node_id, int generation, int speed, + unsigned long long offset, + void *payload, size_t length, + fw_transaction_callback_t callback, void *callback_data) +{ + unsigned long flags; + int tlabel; + + /* Bump the flush timer up 100ms first of all so we + * don't race with a flush timer callback. */ + + mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10)); + + /* Allocate tlabel from the bitmap and put the transaction on + * the list while holding the card spinlock. */ + + spin_lock_irqsave(&card->lock, flags); + + tlabel = card->current_tlabel; + if (card->tlabel_mask & (1 << tlabel)) { + spin_unlock_irqrestore(&card->lock, flags); + callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); + return; + } + + card->current_tlabel = (card->current_tlabel + 1) & 0x1f; + card->tlabel_mask |= (1 << tlabel); + + list_add_tail(&t->link, &card->transaction_list); + + spin_unlock_irqrestore(&card->lock, flags); + + /* Initialize rest of transaction, fill out packet and send it. */ + t->node_id = node_id; + t->tlabel = tlabel; + t->callback = callback; + t->callback_data = callback_data; + + fw_fill_packet(&t->packet, tcode, t->tlabel, + node_id, generation, speed, offset, payload, length); + t->packet.callback = transmit_complete_callback; + + card->driver->send_request(card, &t->packet); +} +EXPORT_SYMBOL(fw_send_request); + +static void +transmit_phy_packet_callback(struct fw_packet *packet, + struct fw_card *card, int status) +{ + kfree(packet); +} + +static void send_phy_packet(struct fw_card *card, u32 data, int generation) +{ + struct fw_packet *packet; + + packet = kzalloc(sizeof *packet, GFP_ATOMIC); + if (packet == NULL) + return; + + packet->header[0] = data; + packet->header[1] = ~data; + packet->header_length = 8; + packet->payload_length = 0; + packet->speed = SCODE_100; + packet->generation = generation; + packet->callback = transmit_phy_packet_callback; + + card->driver->send_request(card, packet); +} + +void fw_send_force_root(struct fw_card *card, int node_id, int generation) +{ + u32 q; + + q = phy_identifier(PHY_PACKET_CONFIG) | phy_config_root_id(node_id); + send_phy_packet(card, q, generation); +} + +void fw_flush_transactions(struct fw_card *card) +{ + struct fw_transaction *t, *next; + struct list_head list; + unsigned long flags; + + INIT_LIST_HEAD(&list); + spin_lock_irqsave(&card->lock, flags); + list_splice_init(&card->transaction_list, &list); + card->tlabel_mask = 0; + spin_unlock_irqrestore(&card->lock, flags); + + list_for_each_entry_safe(t, next, &list, link) + t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); +} + +static struct fw_address_handler * +lookup_overlapping_address_handler(struct list_head *list, + unsigned long long offset, size_t length) +{ + struct fw_address_handler *handler; + + list_for_each_entry(handler, list, link) { + if (handler->offset < offset + length && + offset < handler->offset + handler->length) + return handler; + } + + return NULL; +} + +static struct fw_address_handler * +lookup_enclosing_address_handler(struct list_head *list, + unsigned long long offset, size_t length) +{ + struct fw_address_handler *handler; + + list_for_each_entry(handler, list, link) { + if (handler->offset <= offset && + offset + length <= handler->offset + handler->length) + return handler; + } + + return NULL; +} + +static DEFINE_SPINLOCK(address_handler_lock); +static LIST_HEAD(address_handler_list); + +struct fw_address_region fw_low_memory_region = + { 0x000000000000ull, 0x000100000000ull }; +struct fw_address_region fw_high_memory_region = + { 0x000100000000ull, 0xffffe0000000ull }; +struct fw_address_region fw_private_region = + { 0xffffe0000000ull, 0xfffff0000000ull }; +struct fw_address_region fw_csr_region = + { 0xfffff0000000ULL, 0xfffff0000800ull }; +struct fw_address_region fw_unit_space_region = + { 0xfffff0000900ull, 0x1000000000000ull }; + +EXPORT_SYMBOL(fw_low_memory_region); +EXPORT_SYMBOL(fw_high_memory_region); +EXPORT_SYMBOL(fw_private_region); +EXPORT_SYMBOL(fw_csr_region); +EXPORT_SYMBOL(fw_unit_space_region); + +/** + * Allocate a range of addresses in the node space of the OHCI + * controller. When a request is received that falls within the + * specified address range, the specified callback is invoked. The + * parameters passed to the callback give the details of the + * particular request + */ + +int +fw_core_add_address_handler(struct fw_address_handler *handler, + struct fw_address_region *region) +{ + struct fw_address_handler *other; + unsigned long flags; + int ret = -EBUSY; + + spin_lock_irqsave(&address_handler_lock, flags); + + handler->offset = region->start; + while (handler->offset + handler->length <= region->end) { + other = + lookup_overlapping_address_handler(&address_handler_list, + handler->offset, + handler->length); + if (other != NULL) { + handler->offset += other->length; + } else { + list_add_tail(&handler->link, &address_handler_list); + ret = 0; + break; + } + } + + spin_unlock_irqrestore(&address_handler_lock, flags); + + return ret; +} + +EXPORT_SYMBOL(fw_core_add_address_handler); + +/** + * Deallocate a range of addresses allocated with fw_allocate. This + * will call the associated callback one last time with a the special + * tcode TCODE_DEALLOCATE, to let the client destroy the registered + * callback data. For convenience, the callback parameters offset and + * length are set to the start and the length respectively for the + * deallocated region, payload is set to NULL. + */ + +void fw_core_remove_address_handler(struct fw_address_handler *handler) +{ + unsigned long flags; + + spin_lock_irqsave(&address_handler_lock, flags); + list_del(&handler->link); + spin_unlock_irqrestore(&address_handler_lock, flags); +} + +EXPORT_SYMBOL(fw_core_remove_address_handler); + +struct fw_request { + struct fw_packet response; + int ack; + u32 length; + u32 data[0]; +}; + +static void +free_response_callback(struct fw_packet *packet, + struct fw_card *card, int status) +{ + struct fw_request *request; + + request = container_of(packet, struct fw_request, response); + kfree(request); +} + +static void +fw_fill_response(struct fw_packet *response, + u32 *request, u32 *data, size_t length) +{ + int tcode, tlabel, extended_tcode, source, destination; + + tcode = header_get_tcode(request[0]); + tlabel = header_get_tlabel(request[0]); + source = header_get_destination(request[0]); + destination = header_get_source(request[1]); + extended_tcode = header_get_extended_tcode(request[3]); + + response->header[0] = + header_retry(RETRY_1) | + header_tlabel(tlabel) | + header_destination(destination); + response->header[1] = header_source(source); + response->header[2] = 0; + + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + response->header[0] |= header_tcode(TCODE_WRITE_RESPONSE); + response->header_length = 12; + response->payload_length = 0; + break; + + case TCODE_READ_QUADLET_REQUEST: + response->header[0] |= + header_tcode(TCODE_READ_QUADLET_RESPONSE); + response->header[3] = 0; + response->header_length = 16; + response->payload_length = 0; + break; + + case TCODE_READ_BLOCK_REQUEST: + case TCODE_LOCK_REQUEST: + response->header[0] |= header_tcode(tcode + 2); + response->header[3] = + header_data_length(length) | + header_extended_tcode(extended_tcode); + response->header_length = 16; + response->payload = data; + response->payload_length = length; + break; + + default: + BUG(); + return; + } +} + +static struct fw_request * +allocate_request(u32 *header, int ack, + int speed, int timestamp, int generation) +{ + struct fw_request *request; + u32 *data, length; + int request_tcode; + + request_tcode = header_get_tcode(header[0]); + switch (request_tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + data = &header[3]; + length = 4; + break; + + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_LOCK_REQUEST: + data = &header[4]; + length = header_get_data_length(header[3]); + break; + + case TCODE_READ_QUADLET_REQUEST: + data = NULL; + length = 4; + break; + + case TCODE_READ_BLOCK_REQUEST: + data = NULL; + length = header_get_data_length(header[3]); + break; + + default: + BUG(); + return NULL; + } + + request = kmalloc(sizeof *request + length, GFP_ATOMIC); + if (request == NULL) + return NULL; + + request->response.speed = speed; + request->response.timestamp = timestamp; + request->response.generation = generation; + request->response.callback = free_response_callback; + request->ack = ack; + request->length = length; + if (data) + memcpy(request->data, data, length); + + fw_fill_response(&request->response, header, request->data, length); + + return request; +} + +void +fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) +{ + int response_tcode; + + /* Broadcast packets are reported as ACK_COMPLETE, so this + * check is sufficient to ensure we don't send response to + * broadcast packets or posted writes. */ + if (request->ack != ACK_PENDING) + return; + + request->response.header[1] |= header_rcode(rcode); + response_tcode = header_get_tcode(request->response.header[0]); + if (rcode != RCODE_COMPLETE) + /* Clear the data_length field. */ + request->response.header[3] &= 0xffff; + else if (response_tcode == TCODE_READ_QUADLET_RESPONSE) + request->response.header[3] = request->data[0]; + + card->driver->send_response(card, &request->response); +} + +EXPORT_SYMBOL(fw_send_response); + +void +fw_core_handle_request(struct fw_card *card, + int speed, int ack, int timestamp, + int generation, u32 length, u32 *header) +{ + struct fw_address_handler *handler; + struct fw_request *request; + unsigned long long offset; + unsigned long flags; + int tcode, destination, source, t; + + if (length > 2048) { + /* FIXME: send error response. */ + return; + } + + if (ack != ACK_PENDING && ack != ACK_COMPLETE) + return; + + t = (timestamp & 0x1fff) + 4000; + if (t >= 8000) + t = (timestamp & ~0x1fff) + 0x2000 + t - 8000; + else + t = (timestamp & ~0x1fff) + t; + + request = allocate_request(header, ack, speed, t, generation); + if (request == NULL) { + /* FIXME: send statically allocated busy packet. */ + return; + } + + offset = + ((unsigned long long) + header_get_offset_high(header[1]) << 32) | header[2]; + tcode = header_get_tcode(header[0]); + destination = header_get_destination(header[0]); + source = header_get_source(header[0]); + + spin_lock_irqsave(&address_handler_lock, flags); + handler = lookup_enclosing_address_handler(&address_handler_list, + offset, request->length); + spin_unlock_irqrestore(&address_handler_lock, flags); + + /* FIXME: lookup the fw_node corresponding to the sender of + * this request and pass that to the address handler instead + * of the node ID. We may also want to move the address + * allocations to fw_node so we only do this callback if the + * upper layers registered it for this node. */ + + if (handler == NULL) + fw_send_response(card, request, RCODE_ADDRESS_ERROR); + else + handler->address_callback(card, request, + tcode, destination, source, + generation, speed, offset, + request->data, request->length, + handler->callback_data); +} + +EXPORT_SYMBOL(fw_core_handle_request); + +void +fw_core_handle_response(struct fw_card *card, + int speed, int ack, int timestamp, + u32 length, u32 *header) +{ + struct fw_transaction *t; + unsigned long flags; + u32 *data; + size_t data_length; + int tcode, tlabel, destination, source, rcode; + + tcode = header_get_tcode(header[0]); + tlabel = header_get_tlabel(header[0]); + destination = header_get_destination(header[0]); + source = header_get_source(header[1]); + rcode = header_get_rcode(header[1]); + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry(t, &card->transaction_list, link) { + if (t->node_id == source && t->tlabel == tlabel) { + list_del(&t->link); + card->tlabel_mask &= ~(1 << t->tlabel); + break; + } + } + spin_unlock_irqrestore(&card->lock, flags); + + if (&t->link == &card->transaction_list) { + fw_notify("Unsolicited response\n"); + return; + } + + /* FIXME: sanity check packet, is length correct, does tcodes + * and addresses match. */ + + switch (tcode) { + case TCODE_READ_QUADLET_RESPONSE: + data = (u32 *) &header[3]; + data_length = 4; + break; + + case TCODE_WRITE_RESPONSE: + data = NULL; + data_length = 0; + break; + + case TCODE_READ_BLOCK_RESPONSE: + case TCODE_LOCK_RESPONSE: + data = &header[4]; + data_length = header_get_data_length(header[3]); + break; + + default: + /* Should never happen, this is just to shut up gcc. */ + data = NULL; + data_length = 0; + break; + } + + t->callback(card, rcode, data, data_length, t->callback_data); +} + +EXPORT_SYMBOL(fw_core_handle_response); + +MODULE_AUTHOR("Kristian Hoegsberg "); +MODULE_DESCRIPTION("Core IEEE1394 transaction logic"); +MODULE_LICENSE("GPL"); + +static u32 vendor_textual_descriptor_data[] = { + /* textual descriptor leaf () */ + 0x00080000, + 0x00000000, + 0x00000000, + 0x4c696e75, /* L i n u */ + 0x78204669, /* x F i */ + 0x72657769, /* r e w i */ + 0x72652028, /* r e ( */ + 0x4a554a55, /* J U J U */ + 0x29000000, /* ) */ +}; + +static struct fw_descriptor vendor_textual_descriptor = { + .length = ARRAY_SIZE(vendor_textual_descriptor_data), + .key = 0x81000000, + .data = vendor_textual_descriptor_data +}; + +struct bus_type fw_bus_type = { + .name = "fw", +}; + +static int __init fw_core_init(void) +{ + int retval; + + retval = bus_register(&fw_bus_type); + if (retval < 0) + return retval; + + /* Add the vendor textual descriptor. */ + retval = fw_core_add_descriptor(&vendor_textual_descriptor); + BUG_ON(retval < 0); + + return 0; +} + +static void __exit fw_core_cleanup(void) +{ + bus_unregister(&fw_bus_type); +} + +module_init(fw_core_init); +module_exit(fw_core_cleanup); diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h new file mode 100644 index 0000000..149ef16 --- /dev/null +++ b/drivers/firewire/fw-transaction.h @@ -0,0 +1,422 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-transaction.h - Header for IEEE1394 transaction logic + * + * Copyright (C) 2003-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __fw_core_h +#define __fw_core_h + +#include +#include +#include +#include +#include + +#define TCODE_WRITE_QUADLET_REQUEST 0 +#define TCODE_WRITE_BLOCK_REQUEST 1 +#define TCODE_WRITE_RESPONSE 2 +#define TCODE_READ_QUADLET_REQUEST 4 +#define TCODE_READ_BLOCK_REQUEST 5 +#define TCODE_READ_QUADLET_RESPONSE 6 +#define TCODE_READ_BLOCK_RESPONSE 7 +#define TCODE_CYCLE_START 8 +#define TCODE_LOCK_REQUEST 9 +#define TCODE_STREAM_DATA 10 +#define TCODE_LOCK_RESPONSE 11 + +#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) +#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) +#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) +#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) +#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0) + +/* Juju specific tcodes */ +#define TCODE_DEALLOCATE 0x10 +#define TCODE_LOCK_MASK_SWAP 0x11 +#define TCODE_LOCK_COMPARE_SWAP 0x12 +#define TCODE_LOCK_FETCH_ADD 0x13 +#define TCODE_LOCK_LITTLE_ADD 0x14 +#define TCODE_LOCK_BOUNDED_ADD 0x15 +#define TCODE_LOCK_WRAP_ADD 0x16 +#define TCODE_LOCK_VENDOR_SPECIFIC 0x17 + +#define SCODE_100 0x0 +#define SCODE_200 0x1 +#define SCODE_400 0x2 +#define SCODE_BETA 0x3 + +#define EXTCODE_MASK_SWAP 0x1 +#define EXTCODE_COMPARE_SWAP 0x2 +#define EXTCODE_FETCH_ADD 0x3 +#define EXTCODE_LITTLE_ADD 0x4 +#define EXTCODE_BOUNDED_ADD 0x5 +#define EXTCODE_WRAP_ADD 0x6 + +#define ACK_COMPLETE 0x1 +#define ACK_PENDING 0x2 +#define ACK_BUSY_X 0x4 +#define ACK_BUSY_A 0x5 +#define ACK_BUSY_B 0x6 +#define ACK_DATA_ERROR 0xd +#define ACK_TYPE_ERROR 0xe + +#define RCODE_COMPLETE 0x0 +#define RCODE_CONFLICT_ERROR 0x4 +#define RCODE_DATA_ERROR 0x5 +#define RCODE_TYPE_ERROR 0x6 +#define RCODE_ADDRESS_ERROR 0x7 + +/* Juju specific rcodes */ +#define RCODE_SEND_ERROR 0x10 +#define RCODE_CANCELLED 0x11 +#define RCODE_BUSY 0x12 + +#define RETRY_1 0x00 +#define RETRY_X 0x01 +#define RETRY_A 0x02 +#define RETRY_B 0x03 + +#define LOCAL_BUS 0xffc0 + +#define SELFID_PORT_CHILD 0x3 +#define SELFID_PORT_PARENT 0x2 +#define SELFID_PORT_NCONN 0x1 +#define SELFID_PORT_NONE 0x0 + +#define PHY_PACKET_CONFIG 0x0 +#define PHY_PACKET_LINK_ON 0x1 +#define PHY_PACKET_SELF_ID 0x2 + +#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) +#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) +#define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args) + +static inline void +fw_memcpy_from_be32(void *_dst, void *_src, size_t size) +{ + u32 *dst = _dst; + u32 *src = _src; + int i; + + for (i = 0; i < size / 4; i++) + dst[i] = cpu_to_be32(src[i]); +} + +static inline void +fw_memcpy_to_be32(void *_dst, void *_src, size_t size) +{ + fw_memcpy_from_be32(_dst, _src, size); +} + +struct fw_card; +struct fw_packet; +struct fw_node; +struct fw_request; + +struct fw_descriptor { + struct list_head link; + size_t length; + u32 key; + u32 *data; +}; + +int fw_core_add_descriptor (struct fw_descriptor *desc); +void fw_core_remove_descriptor (struct fw_descriptor *desc); + +typedef void (*fw_packet_callback_t) (struct fw_packet *packet, + struct fw_card *card, int status); + +typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, + void *data, + size_t length, + void *callback_data); + +typedef void (*fw_address_callback_t)(struct fw_card *card, + struct fw_request *request, + int tcode, int destination, int source, + int generation, int speed, + unsigned long long offset, + void *data, size_t length, + void *callback_data); + +typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle, + int node_id, int generation, + u32 *self_ids, + int self_id_count, + void *callback_data); + +struct fw_packet { + int speed; + int generation; + u32 header[4]; + size_t header_length; + void *payload; + size_t payload_length; + u32 timestamp; + + dma_addr_t payload_bus; + + /* This callback is called when the packet transmission has + * completed; for successful transmission, the status code is + * the ack received from the destination, otherwise it's a + * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. + * The callback can be called from tasklet context and thus + * must never block. + */ + fw_packet_callback_t callback; + int status; + struct list_head link; +}; + +struct fw_transaction { + int node_id; /* The generation is implied; it is always the current. */ + int tlabel; + int timestamp; + struct list_head link; + + struct fw_packet packet; + + /* The data passed to the callback is valid only during the + * callback. */ + fw_transaction_callback_t callback; + void *callback_data; +}; + +extern inline struct fw_packet * +fw_packet(struct list_head *l) +{ + return list_entry (l, struct fw_packet, link); +} + +struct fw_address_handler { + u64 offset; + size_t length; + fw_address_callback_t address_callback; + void *callback_data; + struct list_head link; +}; + + +struct fw_address_region { + u64 start; + u64 end; +}; + +extern struct fw_address_region fw_low_memory_region; +extern struct fw_address_region fw_high_memory_region; +extern struct fw_address_region fw_private_region; +extern struct fw_address_region fw_csr_region; +extern struct fw_address_region fw_unit_space_region; + +int fw_core_add_address_handler(struct fw_address_handler *handler, + struct fw_address_region *region); +void fw_core_remove_address_handler(struct fw_address_handler *handler); +void fw_send_response(struct fw_card *card, + struct fw_request *request, int rcode); + +extern struct bus_type fw_bus_type; + +struct fw_card { + struct fw_card_driver *driver; + struct device *device; + + int node_id; + int generation; + /* This is the generation used for timestamping incoming requests. */ + int request_generation; + int current_tlabel, tlabel_mask; + struct list_head transaction_list; + struct timer_list flush_timer; + + unsigned long long guid; + int max_receive; + int link_speed; + int config_rom_generation; + + /* We need to store up to 4 self ID for a maximum of 63 devices. */ + int self_id_count; + u32 self_ids[252]; + + spinlock_t lock; /* Take this lock when handling the lists in + * this struct. */ + struct fw_node *local_node; + struct fw_node *root_node; + struct fw_node *irm_node; + int color; + + int index; + + struct device card_device; + + struct list_head link; +}; + +struct fw_card *fw_card_get(struct fw_card *card); +void fw_card_put(struct fw_card *card); + +/* The iso packet format allows for an immediate header/payload part + * stored in 'header' immediately after the packet info plus an + * indirect payload part that is pointer to by the 'payload' field. + * Applications can use one or the other or both to implement simple + * low-bandwidth streaming (e.g. audio) or more advanced + * scatter-gather streaming (e.g. assembling video frame automatically). */ + +struct fw_iso_packet { + u16 payload_length; /* Length of indirect payload. */ + u32 interrupt : 1; /* Generate interrupt on this packet */ + u32 skip : 1; /* Set to not send packet at all. */ + u32 tag : 2; + u32 sy : 4; + u32 header_length : 8; /* Length of immediate header. */ + u32 header[0]; +}; + +#define FW_ISO_CONTEXT_TRANSMIT 0 +#define FW_ISO_CONTEXT_RECEIVE 1 + +struct fw_iso_context; + +typedef void (*fw_iso_callback_t) (struct fw_iso_context *context, + int status, u32 cycle, void *data); + +struct fw_iso_context { + struct fw_card *card; + int type; + int channel; + int speed; + fw_iso_callback_t callback; + void *callback_data; + + void *buffer; + size_t buffer_size; + dma_addr_t *pages; + int page_count; +}; + +struct fw_iso_context * +fw_iso_context_create(struct fw_card *card, int type, + size_t buffer_size, + fw_iso_callback_t callback, + void *callback_data); + +void +fw_iso_context_destroy(struct fw_iso_context *ctx); + +void +fw_iso_context_start(struct fw_iso_context *ctx, + int channel, int speed, int cycle); + +int +fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, void *payload); + +int +fw_iso_context_send(struct fw_iso_context *ctx, + int channel, int speed, int cycle); + +struct fw_card_driver { + const char *name; + + /* Enable the given card with the given initial config rom. + * This function is expected to activate the card, and either + * enable the PHY or set the link_on bit and initiate a bus + * reset. */ + int (*enable) (struct fw_card *card, u32 *config_rom, size_t length); + + int (*update_phy_reg) (struct fw_card *card, int address, + int clear_bits, int set_bits); + + /* Update the config rom for an enabled card. This function + * should change the config rom that is presented on the bus + * an initiate a bus reset. */ + int (*set_config_rom) (struct fw_card *card, + u32 *config_rom, size_t length); + + void (*send_request) (struct fw_card *card, struct fw_packet *packet); + void (*send_response) (struct fw_card *card, struct fw_packet *packet); + + /* Allow the specified node ID to do direct DMA out and in of + * host memory. The card will disable this for all node when + * a bus reset happens, so driver need to reenable this after + * bus reset. Returns 0 on success, -ENODEV if the card + * doesn't support this, -ESTALE if the generation doesn't + * match. */ + int (*enable_phys_dma) (struct fw_card *card, + int node_id, int generation); + + struct fw_iso_context * + (*allocate_iso_context)(struct fw_card *card, int type); + void (*free_iso_context)(struct fw_iso_context *ctx); + + int (*send_iso)(struct fw_iso_context *ctx, s32 cycle); + + int (*queue_iso)(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, void *payload); +}; + +int +fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); + +void +fw_send_request(struct fw_card *card, struct fw_transaction *t, + int tcode, int node_id, int generation, int speed, + unsigned long long offset, + void *data, size_t length, + fw_transaction_callback_t callback, void *callback_data); + +void fw_flush_transactions(struct fw_card *card); + +void +fw_send_force_root(struct fw_card *card, int node_id, int generation); + +/* Called by the topology code to inform the device code of node + * activity; found, lost, or updated nodes */ +void +fw_node_event(struct fw_card *card, struct fw_node *node, int event); + +/* API used by card level drivers */ + +/* Do we need phy speed here also? If we add more args, maybe we + should go back to struct fw_card_info. */ +void +fw_card_initialize(struct fw_card *card, struct fw_card_driver *driver, + struct device *device); +int +fw_card_add(struct fw_card *card, + u32 max_receive, u32 link_speed, u64 guid); + +void +fw_core_remove_card(struct fw_card *card); + +void +fw_core_handle_bus_reset(struct fw_card *card, + int node_id, int generation, + int self_id_count, u32 *self_ids); +void +fw_core_handle_request(struct fw_card *card, + int speed, int ack, int timestamp, + int generation, + u32 length, u32 *payload); +void +fw_core_handle_response(struct fw_card *card, + int speed, int ack, int timestamp, + u32 length, u32 *payload); + + +#endif /* __fw_core_h */ -- cgit v0.10.2 From 19a15b937b26638933307bb02f7b1801310d6eb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 19 Dec 2006 19:58:31 -0500 Subject: firewire: Add device probing and sysfs integration. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Kristian Høgsberg Signed-off-by: Stefan Richter diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index db7020d..da77bc0 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile @@ -2,6 +2,7 @@ # Makefile for the Linux IEEE 1394 implementation # -fw-core-objs := fw-card.o fw-topology.o fw-transaction.o fw-iso.o +fw-core-objs := fw-card.o fw-topology.o fw-transaction.o fw-iso.o \ + fw-device.o fw-device-cdev.o obj-$(CONFIG_FW) += fw-core.o diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index d8abd70..7977390 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -24,6 +24,7 @@ #include #include "fw-transaction.h" #include "fw-topology.h" +#include "fw-device.h" /* The lib/crc16.c implementation uses the standard (0x8005) * polynomial, but we need the ITU-T (or CCITT) polynomial (0x1021). @@ -186,6 +187,59 @@ fw_core_remove_descriptor (struct fw_descriptor *desc) EXPORT_SYMBOL(fw_core_remove_descriptor); static void +fw_card_irm_work(struct work_struct *work) +{ + struct fw_card *card = + container_of(work, struct fw_card, work.work); + struct fw_device *root; + unsigned long flags; + int new_irm_id, generation; + + /* FIXME: This simple bus management unconditionally picks a + * cycle master if the current root can't do it. We need to + * not do this if there is a bus manager already. Also, some + * hubs set the contender bit, which is bogus, so we should + * probably do a little sanity check on the IRM (like, read + * the bandwidth register) if it's not us. */ + + spin_lock_irqsave(&card->lock, flags); + + generation = card->generation; + root = card->root_node->data; + + if (root == NULL) + /* Either link_on is false, or we failed to read the + * config rom. In either case, pick another root. */ + new_irm_id = card->local_node->node_id; + else if (root->state != FW_DEVICE_RUNNING) + /* If we haven't probed this device yet, bail out now + * and let's try again once that's done. */ + new_irm_id = -1; + else if (root->config_rom[2] & bib_cmc) + /* FIXME: I suppose we should set the cmstr bit in the + * STATE_CLEAR register of this node, as described in + * 1394-1995, 8.4.2.6. Also, send out a force root + * packet for this node. */ + new_irm_id = -1; + else + /* Current root has an active link layer and we + * successfully read the config rom, but it's not + * cycle master capable. */ + new_irm_id = card->local_node->node_id; + + if (card->irm_retries++ > 5) + new_irm_id = -1; + + spin_unlock_irqrestore(&card->lock, flags); + + if (new_irm_id > 0) { + fw_notify("Trying to become root (card %d)\n", card->index); + fw_send_force_root(card, new_irm_id, generation); + fw_core_initiate_bus_reset(card, 1); + } +} + +static void release_card(struct device *device) { struct fw_card *card = @@ -222,6 +276,8 @@ fw_card_initialize(struct fw_card *card, struct fw_card_driver *driver, card->local_node = NULL; + INIT_DELAYED_WORK(&card->work, fw_card_irm_work); + card->card_device.bus = &fw_bus_type; card->card_device.release = release_card; card->card_device.parent = card->device; diff --git a/drivers/firewire/fw-device-cdev.c b/drivers/firewire/fw-device-cdev.c new file mode 100644 index 0000000..c10e332 --- /dev/null +++ b/drivers/firewire/fw-device-cdev.c @@ -0,0 +1,617 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-device-cdev.c - Char device for device raw access + * + * Copyright (C) 2005-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fw-transaction.h" +#include "fw-topology.h" +#include "fw-device.h" +#include "fw-device-cdev.h" + +/* + * todo + * + * - bus resets sends a new packet with new generation and node id + * + */ + +/* dequeue_event() just kfree()'s the event, so the event has to be + * the first field in the struct. */ + +struct event { + struct { void *data; size_t size; } v[2]; + struct list_head link; +}; + +struct response { + struct event event; + struct fw_transaction transaction; + struct client *client; + struct fw_cdev_event_response response; +}; + +struct iso_interrupt { + struct event event; + struct fw_cdev_event_iso_interrupt interrupt; +}; + +struct client { + struct fw_device *device; + spinlock_t lock; + struct list_head handler_list; + struct list_head request_list; + u32 request_serial; + struct list_head event_list; + struct semaphore event_list_sem; + wait_queue_head_t wait; + unsigned long vm_start; + struct fw_iso_context *iso_context; +}; + +static inline void __user * +u64_to_uptr(__u64 value) +{ + return (void __user *)(unsigned long)value; +} + +static inline __u64 +uptr_to_u64(void __user *ptr) +{ + return (__u64)(unsigned long)ptr; +} + +static int fw_device_op_open(struct inode *inode, struct file *file) +{ + struct fw_device *device; + struct client *client; + + device = container_of(inode->i_cdev, struct fw_device, cdev); + + client = kzalloc(sizeof *client, GFP_KERNEL); + if (client == NULL) + return -ENOMEM; + + client->device = fw_device_get(device); + INIT_LIST_HEAD(&client->event_list); + sema_init(&client->event_list_sem, 0); + INIT_LIST_HEAD(&client->handler_list); + INIT_LIST_HEAD(&client->request_list); + spin_lock_init(&client->lock); + init_waitqueue_head(&client->wait); + + file->private_data = client; + + return 0; +} + +static void queue_event(struct client *client, struct event *event, + void *data0, size_t size0, void *data1, size_t size1) +{ + unsigned long flags; + + event->v[0].data = data0; + event->v[0].size = size0; + event->v[1].data = data1; + event->v[1].size = size1; + + spin_lock_irqsave(&client->lock, flags); + + list_add_tail(&event->link, &client->event_list); + + up(&client->event_list_sem); + wake_up_interruptible(&client->wait); + + spin_unlock_irqrestore(&client->lock, flags); +} + +static int dequeue_event(struct client *client, char __user *buffer, size_t count) +{ + unsigned long flags; + struct event *event; + size_t size, total; + int i, retval = -EFAULT; + + if (down_interruptible(&client->event_list_sem) < 0) + return -EINTR; + + spin_lock_irqsave(&client->lock, flags); + + event = container_of(client->event_list.next, struct event, link); + list_del(&event->link); + + spin_unlock_irqrestore(&client->lock, flags); + + if (buffer == NULL) + goto out; + + total = 0; + for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { + size = min(event->v[i].size, count - total); + if (copy_to_user(buffer + total, event->v[i].data, size)) + goto out; + total += size; + } + retval = total; + + out: + kfree(event); + + return retval; +} + +static ssize_t +fw_device_op_read(struct file *file, + char __user *buffer, size_t count, loff_t *offset) +{ + struct client *client = file->private_data; + + return dequeue_event(client, buffer, count); +} + +static int ioctl_config_rom(struct client *client, void __user *arg) +{ + struct fw_cdev_get_config_rom rom; + + rom.length = client->device->config_rom_length; + memcpy(rom.data, client->device->config_rom, rom.length * 4); + if (copy_to_user(arg, &rom, + (char *)&rom.data[rom.length] - (char *)&rom)) + return -EFAULT; + + return 0; +} + +static void +complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + struct response *response = data; + struct client *client = response->client; + + if (length < response->response.length) + response->response.length = length; + if (rcode == RCODE_COMPLETE) + memcpy(response->response.data, payload, + response->response.length); + + response->response.type = FW_CDEV_EVENT_RESPONSE; + response->response.rcode = rcode; + queue_event(client, &response->event, + &response->response, sizeof response->response, + response->response.data, response->response.length); +} + +static ssize_t ioctl_send_request(struct client *client, void __user *arg) +{ + struct fw_device *device = client->device; + struct fw_cdev_send_request request; + struct response *response; + + if (copy_from_user(&request, arg, sizeof request)) + return -EFAULT; + + /* What is the biggest size we'll accept, really? */ + if (request.length > 4096) + return -EINVAL; + + response = kmalloc(sizeof *response + request.length, GFP_KERNEL); + if (response == NULL) + return -ENOMEM; + + response->client = client; + response->response.length = request.length; + response->response.closure = request.closure; + + if (request.data && + copy_from_user(response->response.data, + u64_to_uptr(request.data), request.length)) { + kfree(response); + return -EFAULT; + } + + fw_send_request(device->card, &response->transaction, + request.tcode, + device->node->node_id | LOCAL_BUS, + device->card->generation, + device->node->max_speed, + request.offset, + response->response.data, request.length, + complete_transaction, response); + + if (request.data) + return sizeof request + request.length; + else + return sizeof request; +} + +struct address_handler { + struct fw_address_handler handler; + __u64 closure; + struct client *client; + struct list_head link; +}; + +struct request { + struct fw_request *request; + void *data; + size_t length; + u32 serial; + struct list_head link; +}; + +struct request_event { + struct event event; + struct fw_cdev_event_request request; +}; + +static void +handle_request(struct fw_card *card, struct fw_request *r, + int tcode, int destination, int source, + int generation, int speed, + unsigned long long offset, + void *payload, size_t length, void *callback_data) +{ + struct address_handler *handler = callback_data; + struct request *request; + struct request_event *e; + unsigned long flags; + struct client *client = handler->client; + + request = kmalloc(sizeof *request, GFP_ATOMIC); + e = kmalloc(sizeof *e, GFP_ATOMIC); + if (request == NULL || e == NULL) { + kfree(request); + kfree(e); + fw_send_response(card, r, RCODE_CONFLICT_ERROR); + return; + } + + request->request = r; + request->data = payload; + request->length = length; + + spin_lock_irqsave(&client->lock, flags); + request->serial = client->request_serial++; + list_add_tail(&request->link, &client->request_list); + spin_unlock_irqrestore(&client->lock, flags); + + e->request.type = FW_CDEV_EVENT_REQUEST; + e->request.tcode = tcode; + e->request.offset = offset; + e->request.length = length; + e->request.serial = request->serial; + e->request.closure = handler->closure; + + queue_event(client, &e->event, + &e->request, sizeof e->request, payload, length); +} + +static int ioctl_allocate(struct client *client, void __user *arg) +{ + struct fw_cdev_allocate request; + struct address_handler *handler; + unsigned long flags; + struct fw_address_region region; + + if (copy_from_user(&request, arg, sizeof request)) + return -EFAULT; + + handler = kmalloc(sizeof *handler, GFP_KERNEL); + if (handler == NULL) + return -ENOMEM; + + region.start = request.offset; + region.end = request.offset + request.length; + handler->handler.length = request.length; + handler->handler.address_callback = handle_request; + handler->handler.callback_data = handler; + handler->closure = request.closure; + handler->client = client; + + if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) { + kfree(handler); + return -EBUSY; + } + + spin_lock_irqsave(&client->lock, flags); + list_add_tail(&handler->link, &client->handler_list); + spin_unlock_irqrestore(&client->lock, flags); + + return 0; +} + +static int ioctl_send_response(struct client *client, void __user *arg) +{ + struct fw_cdev_send_response request; + struct request *r; + unsigned long flags; + + if (copy_from_user(&request, arg, sizeof request)) + return -EFAULT; + + spin_lock_irqsave(&client->lock, flags); + list_for_each_entry(r, &client->request_list, link) { + if (r->serial == request.serial) { + list_del(&r->link); + break; + } + } + spin_unlock_irqrestore(&client->lock, flags); + + if (&r->link == &client->request_list) + return -EINVAL; + + if (request.length < r->length) + r->length = request.length; + if (copy_from_user(r->data, u64_to_uptr(request.data), r->length)) + return -EFAULT; + + fw_send_response(client->device->card, r->request, request.rcode); + + kfree(r); + + return 0; +} + +static void +iso_callback(struct fw_iso_context *context, int status, u32 cycle, void *data) +{ + struct client *client = data; + struct iso_interrupt *interrupt; + + interrupt = kzalloc(sizeof *interrupt, GFP_ATOMIC); + if (interrupt == NULL) + return; + + interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; + interrupt->interrupt.closure = 0; + interrupt->interrupt.cycle = cycle; + queue_event(client, &interrupt->event, + &interrupt->interrupt, sizeof interrupt->interrupt, NULL, 0); +} + +static int ioctl_create_iso_context(struct client *client, void __user *arg) +{ + struct fw_cdev_create_iso_context request; + + if (copy_from_user(&request, arg, sizeof request)) + return -EFAULT; + + client->iso_context = fw_iso_context_create(client->device->card, + FW_ISO_CONTEXT_TRANSMIT, + request.buffer_size, + iso_callback, client); + if (IS_ERR(client->iso_context)) + return PTR_ERR(client->iso_context); + + return 0; +} + +static int ioctl_queue_iso(struct client *client, void __user *arg) +{ + struct fw_cdev_queue_iso request; + struct fw_cdev_iso_packet __user *p, *end, *next; + void *payload, *payload_end; + unsigned long index; + int count; + struct { + struct fw_iso_packet packet; + u8 header[256]; + } u; + + if (client->iso_context == NULL) + return -EINVAL; + if (copy_from_user(&request, arg, sizeof request)) + return -EFAULT; + + /* If the user passes a non-NULL data pointer, has mmap()'ed + * the iso buffer, and the pointer points inside the buffer, + * we setup the payload pointers accordingly. Otherwise we + * set them both to NULL, which will still let packets with + * payload_length == 0 through. In other words, if no packets + * use the indirect payload, the iso buffer need not be mapped + * and the request.data pointer is ignored.*/ + + index = (unsigned long)request.data - client->vm_start; + if (request.data != 0 && client->vm_start != 0 && + index <= client->iso_context->buffer_size) { + payload = client->iso_context->buffer + index; + payload_end = client->iso_context->buffer + + client->iso_context->buffer_size; + } else { + payload = NULL; + payload_end = NULL; + } + + if (!access_ok(VERIFY_READ, request.packets, request.size)) + return -EFAULT; + + p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request.packets); + end = (void __user *)p + request.size; + count = 0; + while (p < end) { + if (__copy_from_user(&u.packet, p, sizeof *p)) + return -EFAULT; + next = (struct fw_cdev_iso_packet __user *) + &p->header[u.packet.header_length / 4]; + if (next > end) + return -EINVAL; + if (__copy_from_user + (u.packet.header, p->header, u.packet.header_length)) + return -EFAULT; + if (u.packet.skip && + u.packet.header_length + u.packet.payload_length > 0) + return -EINVAL; + if (payload + u.packet.payload_length > payload_end) + return -EINVAL; + + if (fw_iso_context_queue(client->iso_context, + &u.packet, payload)) + break; + + p = next; + payload += u.packet.payload_length; + count++; + } + + request.size -= uptr_to_u64(p) - request.packets; + request.packets = uptr_to_u64(p); + request.data = + client->vm_start + (payload - client->iso_context->buffer); + + if (copy_to_user(arg, &request, sizeof request)) + return -EFAULT; + + return count; +} + +static int ioctl_send_iso(struct client *client, void __user *arg) +{ + struct fw_cdev_send_iso request; + + if (copy_from_user(&request, arg, sizeof request)) + return -EFAULT; + + return fw_iso_context_send(client->iso_context, request.channel, + request.speed, request.cycle); +} + +static int +dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) +{ + switch (cmd) { + case FW_CDEV_IOC_GET_CONFIG_ROM: + return ioctl_config_rom(client, arg); + case FW_CDEV_IOC_SEND_REQUEST: + return ioctl_send_request(client, arg); + case FW_CDEV_IOC_ALLOCATE: + return ioctl_allocate(client, arg); + case FW_CDEV_IOC_SEND_RESPONSE: + return ioctl_send_response(client, arg); + case FW_CDEV_IOC_CREATE_ISO_CONTEXT: + return ioctl_create_iso_context(client, arg); + case FW_CDEV_IOC_QUEUE_ISO: + return ioctl_queue_iso(client, arg); + case FW_CDEV_IOC_SEND_ISO: + return ioctl_send_iso(client, arg); + default: + return -EINVAL; + } +} + +static long +fw_device_op_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct client *client = file->private_data; + + return dispatch_ioctl(client, cmd, (void __user *) arg); +} + +#ifdef CONFIG_COMPAT +static long +fw_device_op_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct client *client = file->private_data; + + return dispatch_ioctl(client, cmd, compat_ptr(arg)); +} +#endif + +static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct client *client = file->private_data; + + if (client->iso_context->buffer == NULL) + return -EINVAL; + + client->vm_start = vma->vm_start; + + return remap_vmalloc_range(vma, client->iso_context->buffer, 0); +} + +static int fw_device_op_release(struct inode *inode, struct file *file) +{ + struct client *client = file->private_data; + struct address_handler *h, *next; + struct request *r, *next_r; + + if (client->iso_context) + fw_iso_context_destroy(client->iso_context); + + list_for_each_entry_safe(h, next, &client->handler_list, link) { + fw_core_remove_address_handler(&h->handler); + kfree(h); + } + + list_for_each_entry_safe(r, next_r, &client->request_list, link) { + fw_send_response(client->device->card, r->request, + RCODE_CONFLICT_ERROR); + kfree(r); + } + + /* TODO: wait for all transactions to finish so + * complete_transaction doesn't try to queue up responses + * after we free client. */ + while (!list_empty(&client->event_list)) + dequeue_event(client, NULL, 0); + + fw_device_put(client->device); + kfree(client); + + return 0; +} + +static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) +{ + struct client *client = file->private_data; + + poll_wait(file, &client->wait, pt); + + if (!list_empty(&client->event_list)) + return POLLIN | POLLRDNORM; + else + return 0; +} + +struct file_operations fw_device_ops = { + .owner = THIS_MODULE, + .open = fw_device_op_open, + .read = fw_device_op_read, + .unlocked_ioctl = fw_device_op_ioctl, + .poll = fw_device_op_poll, + .release = fw_device_op_release, + .mmap = fw_device_op_mmap, + +#ifdef CONFIG_COMPAT + .compat_ioctl = fw_device_op_compat_ioctl +#endif +}; diff --git a/drivers/firewire/fw-device-cdev.h b/drivers/firewire/fw-device-cdev.h new file mode 100644 index 0000000..18b20c2 --- /dev/null +++ b/drivers/firewire/fw-device-cdev.h @@ -0,0 +1,146 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-device-cdev.h -- Char device interface. + * + * Copyright (C) 2005-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __fw_cdev_h +#define __fw_cdev_h + +#include +#include + +#define TCODE_WRITE_QUADLET_REQUEST 0 +#define TCODE_WRITE_BLOCK_REQUEST 1 +#define TCODE_WRITE_RESPONSE 2 +#define TCODE_READ_QUADLET_REQUEST 4 +#define TCODE_READ_BLOCK_REQUEST 5 +#define TCODE_READ_QUADLET_RESPONSE 6 +#define TCODE_READ_BLOCK_RESPONSE 7 +#define TCODE_CYCLE_START 8 +#define TCODE_LOCK_REQUEST 9 +#define TCODE_STREAM_DATA 10 +#define TCODE_LOCK_RESPONSE 11 + +#define RCODE_COMPLETE 0x0 +#define RCODE_CONFLICT_ERROR 0x4 +#define RCODE_DATA_ERROR 0x5 +#define RCODE_TYPE_ERROR 0x6 +#define RCODE_ADDRESS_ERROR 0x7 + +#define SCODE_100 0x0 +#define SCODE_200 0x1 +#define SCODE_400 0x2 +#define SCODE_800 0x3 +#define SCODE_1600 0x4 +#define SCODE_3200 0x5 + +#define FW_CDEV_EVENT_RESPONSE 0x00 +#define FW_CDEV_EVENT_REQUEST 0x01 +#define FW_CDEV_EVENT_ISO_INTERRUPT 0x02 + +/* The 'closure' fields are for user space to use. Data passed in the + * 'closure' field for a request will be returned in the corresponding + * event. It's a 64-bit type so that it's a fixed size type big + * enough to hold a pointer on all platforms. */ + +struct fw_cdev_event_response { + __u32 type; + __u32 rcode; + __u64 closure; + __u32 length; + __u32 data[0]; +}; + +struct fw_cdev_event_request { + __u32 type; + __u32 tcode; + __u64 offset; + __u64 closure; + __u32 serial; + __u32 length; + __u32 data[0]; +}; + +struct fw_cdev_event_iso_interrupt { + __u32 type; + __u32 cycle; + __u64 closure; +}; + +#define FW_CDEV_IOC_GET_CONFIG_ROM _IOR('#', 0x00, struct fw_cdev_get_config_rom) +#define FW_CDEV_IOC_SEND_REQUEST _IO('#', 0x01) +#define FW_CDEV_IOC_ALLOCATE _IO('#', 0x02) +#define FW_CDEV_IOC_SEND_RESPONSE _IO('#', 0x03) +#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IO('#', 0x04) +#define FW_CDEV_IOC_QUEUE_ISO _IO('#', 0x05) +#define FW_CDEV_IOC_SEND_ISO _IO('#', 0x06) + +struct fw_cdev_get_config_rom { + __u32 length; + __u32 data[256]; +}; + +struct fw_cdev_send_request { + __u32 tcode; + __u32 length; + __u64 offset; + __u64 closure; + __u64 data; +}; + +struct fw_cdev_send_response { + __u32 rcode; + __u32 length; + __u64 data; + __u32 serial; +}; + +struct fw_cdev_allocate { + __u64 offset; + __u64 closure; + __u32 length; +}; + +struct fw_cdev_create_iso_context { + __u32 buffer_size; +}; + +struct fw_cdev_iso_packet { + __u16 payload_length; /* Length of indirect payload. */ + __u32 interrupt : 1; /* Generate interrupt on this packet */ + __u32 skip : 1; /* Set to not send packet at all. */ + __u32 tag : 2; + __u32 sy : 4; + __u32 header_length : 8; /* Length of immediate header. */ + __u32 header[0]; +}; + +struct fw_cdev_queue_iso { + __u32 size; + __u64 packets; + __u64 data; +}; + +struct fw_cdev_send_iso { + __u32 channel; + __u32 speed; + __s32 cycle; +}; + +#endif diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c new file mode 100644 index 0000000..ec1cb7f --- /dev/null +++ b/drivers/firewire/fw-device.c @@ -0,0 +1,613 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-device.c - Device probing and sysfs code. + * + * Copyright (C) 2005-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include "fw-transaction.h" +#include "fw-topology.h" +#include "fw-device.h" + +void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) +{ + ci->p = p + 1; + ci->end = ci->p + (p[0] >> 16); +} + +EXPORT_SYMBOL(fw_csr_iterator_init); + +int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value) +{ + *key = *ci->p >> 24; + *value = *ci->p & 0xffffff; + + return ci->p++ < ci->end; +} + +EXPORT_SYMBOL(fw_csr_iterator_next); + +static int is_fw_unit(struct device *dev); + +static int match_unit_directory(u32 * directory, struct fw_device_id *id) +{ + struct fw_csr_iterator ci; + int key, value, match; + + match = 0; + fw_csr_iterator_init(&ci, directory); + while (fw_csr_iterator_next(&ci, &key, &value)) { + if (key == CSR_VENDOR && value == id->vendor) + match |= FW_MATCH_VENDOR; + if (key == CSR_MODEL && value == id->model) + match |= FW_MATCH_MODEL; + if (key == CSR_SPECIFIER_ID && value == id->specifier_id) + match |= FW_MATCH_SPECIFIER_ID; + if (key == CSR_VERSION && value == id->version) + match |= FW_MATCH_VERSION; + } + + return (match & id->match_flags) == id->match_flags; +} + +static int fw_unit_match(struct device *dev, struct device_driver *drv) +{ + struct fw_unit *unit = fw_unit(dev); + struct fw_driver *driver = fw_driver(drv); + int i; + + /* We only allow binding to fw_units. */ + if (!is_fw_unit(dev)) + return 0; + + for (i = 0; driver->id_table[i].match_flags != 0; i++) { + if (match_unit_directory(unit->directory, &driver->id_table[i])) + return 1; + } + + return 0; +} + +static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) +{ + struct fw_device *device = fw_device(unit->device.parent); + struct fw_csr_iterator ci; + + int key, value; + int vendor = 0; + int model = 0; + int specifier_id = 0; + int version = 0; + + fw_csr_iterator_init(&ci, &device->config_rom[5]); + while (fw_csr_iterator_next(&ci, &key, &value)) { + switch (key) { + case CSR_VENDOR: + vendor = value; + break; + case CSR_MODEL: + model = value; + break; + } + } + + fw_csr_iterator_init(&ci, unit->directory); + while (fw_csr_iterator_next(&ci, &key, &value)) { + switch (key) { + case CSR_SPECIFIER_ID: + specifier_id = value; + break; + case CSR_VERSION: + version = value; + break; + } + } + + return snprintf(buffer, buffer_size, + "ieee1394:ven%08Xmo%08Xsp%08Xver%08X", + vendor, model, specifier_id, version); +} + +static int +fw_unit_uevent(struct device *dev, char **envp, int num_envp, + char *buffer, int buffer_size) +{ + struct fw_unit *unit = fw_unit(dev); + char modalias[64]; + int length = 0; + int i = 0; + + if (!is_fw_unit(dev)) + goto out; + + get_modalias(unit, modalias, sizeof modalias); + + if (add_uevent_var(envp, num_envp, &i, + buffer, buffer_size, &length, + "MODALIAS=%s", modalias)) + return -ENOMEM; + + out: + envp[i] = NULL; + + return 0; +} + +struct bus_type fw_bus_type = { + .name = "fw", + .match = fw_unit_match, + .uevent = fw_unit_uevent +}; + +EXPORT_SYMBOL(fw_bus_type); + +extern struct fw_device *fw_device_get(struct fw_device *device) +{ + get_device(&device->device); + + return device; +} + +extern void fw_device_put(struct fw_device *device) +{ + put_device(&device->device); +} + +static void fw_device_release(struct device *dev) +{ + struct fw_device *device = fw_device(dev); + unsigned long flags; + + /* Take the card lock so we don't set this to NULL while a + * FW_NODE_UPDATED callback is being handled. */ + spin_lock_irqsave(&device->card->lock, flags); + device->node->data = NULL; + spin_unlock_irqrestore(&device->card->lock, flags); + + fw_node_put(device->node); + fw_card_put(device->card); + kfree(device->config_rom); + kfree(device); +} + +int fw_device_enable_phys_dma(struct fw_device *device) +{ + return device->card->driver->enable_phys_dma(device->card, + device->node_id, + device->generation); +} + +EXPORT_SYMBOL(fw_device_enable_phys_dma); + +static ssize_t +show_modalias_attribute(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_unit *unit = fw_unit(dev); + int length; + + length = get_modalias(unit, buf, PAGE_SIZE); + strcpy(buf + length, "\n"); + + return length + 1; +} + +static struct device_attribute modalias_attribute = { + .attr = {.name = "modalias",.mode = S_IRUGO}, + .show = show_modalias_attribute +}; + +static ssize_t +show_config_rom_attribute(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_device *device = fw_device(dev); + + memcpy(buf, device->config_rom, device->config_rom_length * 4); + + return device->config_rom_length * 4; +} + +static struct device_attribute config_rom_attribute = { + .attr = {.name = "config_rom",.mode = S_IRUGO}, + .show = show_config_rom_attribute, +}; + +struct read_quadlet_callback_data { + struct completion done; + int rcode; + u32 data; +}; + +static void +complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + struct read_quadlet_callback_data *callback_data = data; + + if (rcode == RCODE_COMPLETE) + callback_data->data = be32_to_cpu(*(__be32 *)payload); + callback_data->rcode = rcode; + complete(&callback_data->done); +} + +static int read_rom(struct fw_device *device, int index, u32 * data) +{ + struct read_quadlet_callback_data callback_data; + struct fw_transaction t; + u64 offset; + + init_completion(&callback_data.done); + + offset = 0xfffff0000400ULL + index * 4; + fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST, + device->node_id | LOCAL_BUS, + device->generation, SCODE_100, + offset, NULL, 4, complete_transaction, &callback_data); + + wait_for_completion(&callback_data.done); + + *data = callback_data.data; + + return callback_data.rcode; +} + +static int read_bus_info_block(struct fw_device *device) +{ + static u32 rom[256]; + u32 stack[16], sp, key; + int i, end, length; + + /* First read the bus info block. */ + for (i = 0; i < 5; i++) { + if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) + return -1; + /* As per IEEE1212 7.2, during power-up, devices can + * reply with a 0 for the first quadlet of the config + * rom to indicate that they are booting (for example, + * if the firmware is on the disk of a external + * harddisk). In that case we just fail, and the + * retry mechanism will try again later. */ + if (i == 0 && rom[i] == 0) + return -1; + } + + /* Now parse the config rom. The config rom is a recursive + * directory structure so we parse it using a stack of + * references to the blocks that make up the structure. We + * push a reference to the root directory on the stack to + * start things off. */ + length = i; + sp = 0; + stack[sp++] = 0xc0000005; + while (sp > 0) { + /* Pop the next block reference of the stack. The + * lower 24 bits is the offset into the config rom, + * the upper 8 bits are the type of the reference the + * block. */ + key = stack[--sp]; + i = key & 0xffffff; + if (i >= ARRAY_SIZE(rom)) + /* The reference points outside the standard + * config rom area, something's fishy. */ + return -1; + + /* Read header quadlet for the block to get the length. */ + if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) + return -1; + end = i + (rom[i] >> 16) + 1; + i++; + if (end > ARRAY_SIZE(rom)) + /* This block extends outside standard config + * area (and the array we're reading it + * into). That's broken, so ignore this + * device. */ + return -1; + + /* Now read in the block. If this is a directory + * block, check the entries as we read them to see if + * it references another block, and push it in that case. */ + while (i < end) { + if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) + return -1; + if ((key >> 30) == 3 && (rom[i] >> 30) > 1 && + sp < ARRAY_SIZE(stack)) + stack[sp++] = i + rom[i]; + i++; + } + if (length < i) + length = i; + } + + device->config_rom = kmalloc(length * 4, GFP_KERNEL); + if (device->config_rom == NULL) + return -1; + memcpy(device->config_rom, rom, length * 4); + device->config_rom_length = length; + + return 0; +} + +static void fw_unit_release(struct device *dev) +{ + struct fw_unit *unit = fw_unit(dev); + + kfree(unit); +} + +static int is_fw_unit(struct device *dev) +{ + return dev->release == fw_unit_release; +} + +static void create_units(struct fw_device *device) +{ + struct fw_csr_iterator ci; + struct fw_unit *unit; + int key, value, i; + + i = 0; + fw_csr_iterator_init(&ci, &device->config_rom[5]); + while (fw_csr_iterator_next(&ci, &key, &value)) { + if (key != (CSR_UNIT | CSR_DIRECTORY)) + continue; + + /* Get the address of the unit directory and try to + * match the drivers id_tables against it. */ + unit = kzalloc(sizeof *unit, GFP_KERNEL); + if (unit == NULL) { + fw_error("failed to allocate memory for unit\n"); + continue; + } + + unit->directory = ci.p + value - 1; + unit->device.bus = &fw_bus_type; + unit->device.release = fw_unit_release; + unit->device.parent = &device->device; + snprintf(unit->device.bus_id, sizeof unit->device.bus_id, + "%s.%d", device->device.bus_id, i++); + + if (device_register(&unit->device) < 0) { + kfree(unit); + continue; + } + + if (device_create_file(&unit->device, &modalias_attribute) < 0) { + device_unregister(&unit->device); + kfree(unit); + } + } +} + +static int shutdown_unit(struct device *device, void *data) +{ + struct fw_unit *unit = fw_unit(device); + + if (is_fw_unit(device)) { + device_remove_file(&unit->device, &modalias_attribute); + device_unregister(&unit->device); + } + + return 0; +} + +static void fw_device_shutdown(struct work_struct *work) +{ + struct fw_device *device = + container_of(work, struct fw_device, work.work); + + device_remove_file(&device->device, &config_rom_attribute); + cdev_del(&device->cdev); + unregister_chrdev_region(device->device.devt, 1); + device_for_each_child(&device->device, NULL, shutdown_unit); + device_unregister(&device->device); +} + +/* These defines control the retry behavior for reading the config + * rom. It shouldn't be necessary to tweak these; if the device + * doesn't respond to a config rom read within 10 seconds, it's not + * going to respond at all. As for the initial delay, a lot of + * devices will be able to respond within half a second after bus + * reset. On the other hand, it's not really worth being more + * aggressive than that, since it scales pretty well; if 10 devices + * are plugged in, they're all getting read within one second. */ + +#define MAX_RETRIES 5 +#define RETRY_DELAY (2 * HZ) +#define INITIAL_DELAY (HZ / 2) + +static void fw_device_init(struct work_struct *work) +{ + static int serial; + struct fw_device *device = + container_of(work, struct fw_device, work.work); + + /* All failure paths here set node->data to NULL, so that we + * don't try to do device_for_each_child() on a kfree()'d + * device. */ + + if (read_bus_info_block(device) < 0) { + if (device->config_rom_retries < MAX_RETRIES) { + device->config_rom_retries++; + schedule_delayed_work(&device->work, RETRY_DELAY); + } else { + fw_notify("giving up on config rom for node id %d\n", + device->node_id); + fw_device_release(&device->device); + } + return; + } + + device->device.bus = &fw_bus_type; + device->device.release = fw_device_release; + device->device.parent = device->card->device; + snprintf(device->device.bus_id, sizeof device->device.bus_id, + "fw%d", serial++); + + if (alloc_chrdev_region(&device->device.devt, 0, 1, "fw")) { + fw_error("Failed to register char device region.\n"); + goto error; + } + + cdev_init(&device->cdev, &fw_device_ops); + device->cdev.owner = THIS_MODULE; + kobject_set_name(&device->cdev.kobj, device->device.bus_id); + if (cdev_add(&device->cdev, device->device.devt, 1)) { + fw_error("Failed to register char device.\n"); + goto error; + } + + if (device_add(&device->device)) { + fw_error("Failed to add device.\n"); + goto error; + } + + if (device_create_file(&device->device, &config_rom_attribute) < 0) { + fw_error("Failed to create config rom file.\n"); + goto error_with_device; + } + + create_units(device); + + /* Transition the device to running state. If it got pulled + * out from under us while we did the intialization work, we + * have to shut down the device again here. Normally, though, + * fw_node_event will be responsible for shutting it down when + * necessary. We have to use the atomic cmpxchg here to avoid + * racing with the FW_NODE_DESTROYED case in + * fw_node_event(). */ + if (cmpxchg(&device->state, + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) + fw_device_shutdown(&device->work.work); + else + fw_notify("created new fw device %s (%d config rom retries)\n", + device->device.bus_id, device->config_rom_retries); + + /* Reschedule the IRM work if we just finished reading the + * root node config rom. If this races with a bus reset we + * just end up running the IRM work a couple of extra times - + * pretty harmless. */ + if (device->node == device->card->root_node) + schedule_delayed_work(&device->card->work, 0); + + return; + + error_with_device: + device_del(&device->device); + error: + cdev_del(&device->cdev); + unregister_chrdev_region(device->device.devt, 1); + put_device(&device->device); +} + +static int update_unit(struct device *dev, void *data) +{ + struct fw_unit *unit = fw_unit(dev); + struct fw_driver *driver = (struct fw_driver *)dev->driver; + + if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) + driver->update(unit); + + return 0; +} + +void fw_node_event(struct fw_card *card, struct fw_node *node, int event) +{ + struct fw_device *device; + + /* Ignore events for the local node (i.e. the node that + * corresponds to the ieee1394 controller in this linux box). */ + if (node == card->local_node) + return; + + switch (event) { + case FW_NODE_CREATED: + case FW_NODE_LINK_ON: + if (!node->link_on) + break; + + device = kzalloc(sizeof(*device), GFP_ATOMIC); + if (device == NULL) + break; + + /* Do minimal intialization of the device here, the + * rest will happen in fw_device_init(). We need the + * card and node so we can read the config rom and we + * need to do device_initialize() now so + * device_for_each_child() in FW_NODE_UPDATED is + * doesn't freak out. */ + device_initialize(&device->device); + device->state = FW_DEVICE_INITIALIZING; + device->card = fw_card_get(card); + device->node = fw_node_get(node); + device->node_id = node->node_id; + device->generation = card->generation; + + /* Set the node data to point back to this device so + * FW_NODE_UPDATED callbacks can update the node_id + * and generation for the device. */ + node->data = device; + + /* Many devices are slow to respond after bus resets, + * especially if they are bus powered and go through + * power-up after getting plugged in. We schedule the + * first config rom scan half a second after bus reset. */ + INIT_DELAYED_WORK(&device->work, fw_device_init); + schedule_delayed_work(&device->work, INITIAL_DELAY); + break; + + case FW_NODE_UPDATED: + if (!node->link_on || node->data == NULL) + break; + + device = node->data; + device->node_id = node->node_id; + device->generation = card->generation; + device_for_each_child(&device->device, NULL, update_unit); + break; + + case FW_NODE_DESTROYED: + case FW_NODE_LINK_OFF: + if (!node->data) + break; + + /* Destroy the device associated with the node. There + * are two cases here: either the device is fully + * initialized (FW_DEVICE_RUNNING) or we're in the + * process of reading its config rom + * (FW_DEVICE_INITIALIZING). If it is fully + * initialized we can reuse device->work to schedule a + * full fw_device_shutdown(). If not, there's work + * scheduled to read it's config rom, and we just put + * the device in shutdown state to have that code fail + * to create the device. */ + device = node->data; + if (xchg(&device->state, + FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) { + INIT_DELAYED_WORK(&device->work, fw_device_shutdown); + schedule_delayed_work(&device->work, 0); + } + break; + } +} diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h new file mode 100644 index 0000000..84cd5e7 --- /dev/null +++ b/drivers/firewire/fw-device.h @@ -0,0 +1,127 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-device.h - Device probing and sysfs code. + * + * Copyright (C) 2005-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __fw_device_h +#define __fw_device_h + +#include +#include + +enum fw_device_state { + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING, + FW_DEVICE_SHUTDOWN +}; + +struct fw_device { + int state; + struct fw_node *node; + int node_id; + int generation; + struct fw_card *card; + struct device device; + struct cdev cdev; + __be32 *config_rom; + size_t config_rom_length; + int config_rom_retries; + struct delayed_work work; +}; + +static inline struct fw_device * +fw_device(struct device *dev) +{ + return container_of(dev, struct fw_device, device); +} + +struct fw_device *fw_device_get(struct fw_device *device); +void fw_device_put(struct fw_device *device); +int fw_device_enable_phys_dma(struct fw_device *device); + +struct fw_unit { + struct device device; + u32 *directory; +}; + +static inline struct fw_unit * +fw_unit(struct device *dev) +{ + return container_of(dev, struct fw_unit, device); +} + +#define CSR_OFFSET 0x40 +#define CSR_LEAF 0x80 +#define CSR_DIRECTORY 0xc0 + +#define CSR_DESCRIPTOR 0x01 +#define CSR_VENDOR 0x03 +#define CSR_HARDWARE_VERSION 0x04 +#define CSR_NODE_CAPABILITIES 0x0c +#define CSR_UNIT 0x11 +#define CSR_SPECIFIER_ID 0x12 +#define CSR_VERSION 0x13 +#define CSR_DEPENDENT_INFO 0x14 +#define CSR_MODEL 0x17 +#define CSR_INSTANCE 0x18 + +#define SBP2_COMMAND_SET_SPECIFIER 0x38 +#define SBP2_COMMAND_SET 0x39 +#define SBP2_COMMAND_SET_REVISION 0x3b +#define SBP2_FIRMWARE_REVISION 0x3c + +struct fw_csr_iterator { + u32 *p; + u32 *end; +}; + +void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p); +int fw_csr_iterator_next(struct fw_csr_iterator *ci, + int *key, int *value); + +#define FW_MATCH_VENDOR 0x0001 +#define FW_MATCH_MODEL 0x0002 +#define FW_MATCH_SPECIFIER_ID 0x0004 +#define FW_MATCH_VERSION 0x0008 + +struct fw_device_id { + u32 match_flags; + u32 vendor; + u32 model; + u32 specifier_id; + u32 version; + void *driver_data; +}; + +struct fw_driver { + struct device_driver driver; + /* Called when the parent device sits through a bus reset. */ + void (*update) (struct fw_unit *unit); + struct fw_device_id *id_table; +}; + +static inline struct fw_driver * +fw_driver(struct device_driver *drv) +{ + return container_of(drv, struct fw_driver, driver); +} + +extern struct file_operations fw_device_ops; + +#endif diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index 61548c4..6b63856 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -26,6 +26,7 @@ #include "fw-transaction.h" #include "fw-topology.h" +#include "fw-device.h" static int setup_iso_buffer(struct fw_iso_context *ctx, size_t size, diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 2778aa3..e475025 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c @@ -434,13 +434,15 @@ fw_core_handle_bus_reset(struct fw_card *card, for_each_fw_node(card, local_node, report_found_node); } else { update_tree(card, local_node, &changed); + if (changed) + card->irm_retries = 0; } + /* If we're not the root node, we may have to do some IRM work. */ + if (card->local_node != card->root_node) + schedule_delayed_work(&card->work, 0); + spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL(fw_core_handle_bus_reset); - -void fw_node_event(struct fw_card *card, struct fw_node *node, int event) -{ -} diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index c3acf74..affd420 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -33,6 +33,7 @@ #include "fw-transaction.h" #include "fw-topology.h" +#include "fw-device.h" #define header_pri(pri) ((pri) << 0) #define header_tcode(tcode) ((tcode) << 4) @@ -702,10 +703,6 @@ static struct fw_descriptor vendor_textual_descriptor = { .data = vendor_textual_descriptor_data }; -struct bus_type fw_bus_type = { - .name = "fw", -}; - static int __init fw_core_init(void) { int retval; diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 149ef16..7f618f2 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -265,6 +265,10 @@ struct fw_card { struct device card_device; struct list_head link; + + /* Work struct for IRM duties. */ + struct delayed_work work; + int irm_retries; }; struct fw_card *fw_card_get(struct fw_card *card); -- cgit v0.10.2 From ed5689122f4cdb5cb8c6770ad1a2c8561b32d9b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 19 Dec 2006 19:58:35 -0500 Subject: firewire: Add driver for OHCI firewire host controllers. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Kristian Høgsberg Signed-off-by: Stefan Richter diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index bdd6303..b386334 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -20,4 +20,15 @@ config FW To compile this driver as a module, say M here: the module will be called fw-core. +config FW_OHCI + tristate "Support for OHCI firewire host controllers" + depends on PCI && FW + help + Enable this driver if you have an firewire controller based + on the OHCI specification. For all practical purposes, this + is the only chipset in use, so say Y here. + + To compile this driver as a module, say M here: the + module will be called fw-ohci. + endmenu diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index da77bc0..add3b98 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile @@ -6,3 +6,4 @@ fw-core-objs := fw-card.o fw-topology.o fw-transaction.o fw-iso.o \ fw-device.o fw-device-cdev.o obj-$(CONFIG_FW) += fw-core.o +obj-$(CONFIG_FW_OHCI) += fw-ohci.o diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c new file mode 100644 index 0000000..5392a2b --- /dev/null +++ b/drivers/firewire/fw-ohci.c @@ -0,0 +1,1394 @@ +/* -*- c-basic-offset: 8 -*- + * + * fw-ohci.c - Driver for OHCI 1394 boards + * Copyright (C) 2003-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fw-transaction.h" +#include "fw-ohci.h" + +#define descriptor_output_more 0 +#define descriptor_output_last (1 << 12) +#define descriptor_input_more (2 << 12) +#define descriptor_input_last (3 << 12) +#define descriptor_status (1 << 11) +#define descriptor_key_immediate (2 << 8) +#define descriptor_ping (1 << 7) +#define descriptor_yy (1 << 6) +#define descriptor_no_irq (0 << 4) +#define descriptor_irq_error (1 << 4) +#define descriptor_irq_always (3 << 4) +#define descriptor_branch_always (3 << 2) + +struct descriptor { + __le16 req_count; + __le16 control; + __le32 data_address; + __le32 branch_address; + __le16 res_count; + __le16 transfer_status; +} __attribute__((aligned(16))); + +struct ar_context { + struct fw_ohci *ohci; + struct descriptor descriptor; + __le32 buffer[512]; + dma_addr_t descriptor_bus; + dma_addr_t buffer_bus; + + u32 command_ptr; + u32 control_set; + u32 control_clear; + + struct tasklet_struct tasklet; +}; + +struct at_context { + struct fw_ohci *ohci; + dma_addr_t descriptor_bus; + dma_addr_t buffer_bus; + + struct list_head list; + + struct { + struct descriptor more; + __le32 header[4]; + struct descriptor last; + } d; + + u32 command_ptr; + u32 control_set; + u32 control_clear; + + struct tasklet_struct tasklet; +}; + +#define it_header_sy(v) ((v) << 0) +#define it_header_tcode(v) ((v) << 4) +#define it_header_channel(v) ((v) << 8) +#define it_header_tag(v) ((v) << 14) +#define it_header_speed(v) ((v) << 16) +#define it_header_data_length(v) ((v) << 16) + +struct iso_context { + struct fw_iso_context base; + struct tasklet_struct tasklet; + u32 control_set; + u32 control_clear; + u32 command_ptr; + u32 context_match; + + struct descriptor *buffer; + dma_addr_t buffer_bus; + struct descriptor *head_descriptor; + struct descriptor *tail_descriptor; + struct descriptor *tail_descriptor_last; + struct descriptor *prev_descriptor; +}; + +#define CONFIG_ROM_SIZE 1024 + +struct fw_ohci { + struct fw_card card; + + __iomem char *registers; + dma_addr_t self_id_bus; + __le32 *self_id_cpu; + struct tasklet_struct bus_reset_tasklet; + int generation; + int request_generation; + + /* Spinlock for accessing fw_ohci data. Never call out of + * this driver with this lock held. */ + spinlock_t lock; + u32 self_id_buffer[512]; + + /* Config rom buffers */ + __be32 *config_rom; + dma_addr_t config_rom_bus; + __be32 *next_config_rom; + dma_addr_t next_config_rom_bus; + u32 next_header; + + struct ar_context ar_request_ctx; + struct ar_context ar_response_ctx; + struct at_context at_request_ctx; + struct at_context at_response_ctx; + + u32 it_context_mask; + struct iso_context *it_context_list; + u32 ir_context_mask; + struct iso_context *ir_context_list; +}; + +extern inline struct fw_ohci *fw_ohci(struct fw_card *card) +{ + return container_of(card, struct fw_ohci, card); +} + +#define CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 + +#define CONTEXT_RUN 0x8000 +#define CONTEXT_WAKE 0x1000 +#define CONTEXT_DEAD 0x0800 +#define CONTEXT_ACTIVE 0x0400 + +#define OHCI1394_MAX_AT_REQ_RETRIES 0x2 +#define OHCI1394_MAX_AT_RESP_RETRIES 0x2 +#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 + +#define FW_OHCI_MAJOR 240 +#define OHCI1394_REGISTER_SIZE 0x800 +#define OHCI_LOOP_COUNT 500 +#define OHCI1394_PCI_HCI_Control 0x40 +#define SELF_ID_BUF_SIZE 0x800 + +/* FIXME: Move this to linux/pci_ids.h */ +#define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010 + +static char ohci_driver_name[] = KBUILD_MODNAME; + +extern inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) +{ + writel(data, ohci->registers + offset); +} + +extern inline u32 reg_read(const struct fw_ohci *ohci, int offset) +{ + return readl(ohci->registers + offset); +} + +extern inline void flush_writes(const struct fw_ohci *ohci) +{ + /* Do a dummy read to flush writes. */ + reg_read(ohci, OHCI1394_Version); +} + +static int +ohci_update_phy_reg(struct fw_card *card, int addr, + int clear_bits, int set_bits) +{ + struct fw_ohci *ohci = fw_ohci(card); + u32 val, old; + + reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); + msleep(2); + val = reg_read(ohci, OHCI1394_PhyControl); + if ((val & OHCI1394_PhyControl_ReadDone) == 0) { + fw_error("failed to set phy reg bits.\n"); + return -EBUSY; + } + + old = OHCI1394_PhyControl_ReadData(val); + old = (old & ~clear_bits) | set_bits; + reg_write(ohci, OHCI1394_PhyControl, + OHCI1394_PhyControl_Write(addr, old)); + + return 0; +} + +static void ar_context_run(struct ar_context *ctx) +{ + reg_write(ctx->ohci, ctx->command_ptr, ctx->descriptor_bus | 1); + reg_write(ctx->ohci, ctx->control_set, CONTEXT_RUN); + flush_writes(ctx->ohci); +} + +static void ar_context_tasklet(unsigned long data) +{ + struct ar_context *ctx = (struct ar_context *)data; + struct fw_ohci *ohci = ctx->ohci; + u32 status; + int length, speed, ack, timestamp, tcode; + + /* FIXME: What to do about evt_* errors? */ + length = le16_to_cpu(ctx->descriptor.req_count) - + le16_to_cpu(ctx->descriptor.res_count) - 4; + status = le32_to_cpu(ctx->buffer[length / 4]); + ack = ((status >> 16) & 0x1f) - 16; + speed = (status >> 21) & 0x7; + timestamp = status & 0xffff; + + ctx->buffer[0] = le32_to_cpu(ctx->buffer[0]); + ctx->buffer[1] = le32_to_cpu(ctx->buffer[1]); + ctx->buffer[2] = le32_to_cpu(ctx->buffer[2]); + + tcode = (ctx->buffer[0] >> 4) & 0x0f; + if (TCODE_IS_BLOCK_PACKET(tcode)) + ctx->buffer[3] = le32_to_cpu(ctx->buffer[3]); + + /* The OHCI bus reset handler synthesizes a phy packet with + * the new generation number when a bus reset happens (see + * section 8.4.2.3). This helps us determine when a request + * was received and make sure we send the response in the same + * generation. We only need this for requests; for responses + * we use the unique tlabel for finding the matching + * request. */ + + if (ack + 16 == 0x09) + ohci->request_generation = (ctx->buffer[2] >> 16) & 0xff; + else if (ctx == &ohci->ar_request_ctx) + fw_core_handle_request(&ohci->card, speed, ack, timestamp, + ohci->request_generation, + length, ctx->buffer); + else + fw_core_handle_response(&ohci->card, speed, ack, timestamp, + length, ctx->buffer); + + ctx->descriptor.data_address = cpu_to_le32(ctx->buffer_bus); + ctx->descriptor.req_count = cpu_to_le16(sizeof ctx->buffer); + ctx->descriptor.res_count = cpu_to_le16(sizeof ctx->buffer); + + dma_sync_single_for_device(ohci->card.device, ctx->descriptor_bus, + sizeof ctx->descriptor_bus, DMA_TO_DEVICE); + + /* FIXME: We stop and restart the ar context here, what if we + * stop while a receive is in progress? Maybe we could just + * loop the context back to itself and use it in buffer fill + * mode as intended... */ + + reg_write(ctx->ohci, ctx->control_clear, CONTEXT_RUN); + ar_context_run(ctx); +} + +static int +ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 control_set) +{ + ctx->descriptor_bus = + dma_map_single(ohci->card.device, &ctx->descriptor, + sizeof ctx->descriptor, DMA_TO_DEVICE); + if (ctx->descriptor_bus == 0) + return -ENOMEM; + + if (ctx->descriptor_bus & 0xf) + fw_notify("descriptor not 16-byte aligned: 0x%08x\n", + ctx->descriptor_bus); + + ctx->buffer_bus = + dma_map_single(ohci->card.device, ctx->buffer, + sizeof ctx->buffer, DMA_FROM_DEVICE); + + if (ctx->buffer_bus == 0) { + dma_unmap_single(ohci->card.device, ctx->descriptor_bus, + sizeof ctx->descriptor, DMA_TO_DEVICE); + return -ENOMEM; + } + + memset(&ctx->descriptor, 0, sizeof ctx->descriptor); + ctx->descriptor.control = cpu_to_le16(descriptor_input_more | + descriptor_status | + descriptor_branch_always); + ctx->descriptor.req_count = cpu_to_le16(sizeof ctx->buffer); + ctx->descriptor.data_address = cpu_to_le32(ctx->buffer_bus); + ctx->descriptor.res_count = cpu_to_le16(sizeof ctx->buffer); + + ctx->control_set = control_set; + ctx->control_clear = control_set + 4; + ctx->command_ptr = control_set + 12; + ctx->ohci = ohci; + + tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); + + ar_context_run(ctx); + + return 0; +} + +static void +do_packet_callbacks(struct fw_ohci *ohci, struct list_head *list) +{ + struct fw_packet *p, *next; + + list_for_each_entry_safe(p, next, list, link) + p->callback(p, &ohci->card, p->status); +} + +static void +complete_transmission(struct fw_packet *packet, + int status, struct list_head *list) +{ + list_move_tail(&packet->link, list); + packet->status = status; +} + +/* This function prepares the first packet in the context queue for + * transmission. Must always be called with the ochi->lock held to + * ensure proper generation handling and locking around packet queue + * manipulation. */ +static void +at_context_setup_packet(struct at_context *ctx, struct list_head *list) +{ + struct fw_packet *packet; + struct fw_ohci *ohci = ctx->ohci; + int z, tcode; + + packet = fw_packet(ctx->list.next); + + memset(&ctx->d, 0, sizeof ctx->d); + if (packet->payload_length > 0) { + packet->payload_bus = dma_map_single(ohci->card.device, + packet->payload, + packet->payload_length, + DMA_TO_DEVICE); + if (packet->payload_bus == 0) { + complete_transmission(packet, -ENOMEM, list); + return; + } + + ctx->d.more.control = + cpu_to_le16(descriptor_output_more | + descriptor_key_immediate); + ctx->d.more.req_count = cpu_to_le16(packet->header_length); + ctx->d.more.res_count = cpu_to_le16(packet->timestamp); + ctx->d.last.control = + cpu_to_le16(descriptor_output_last | + descriptor_irq_always | + descriptor_branch_always); + ctx->d.last.req_count = cpu_to_le16(packet->payload_length); + ctx->d.last.data_address = cpu_to_le32(packet->payload_bus); + z = 3; + } else { + ctx->d.more.control = + cpu_to_le16(descriptor_output_last | + descriptor_key_immediate | + descriptor_irq_always | + descriptor_branch_always); + ctx->d.more.req_count = cpu_to_le16(packet->header_length); + ctx->d.more.res_count = cpu_to_le16(packet->timestamp); + z = 2; + } + + /* The DMA format for asyncronous link packets is different + * from the IEEE1394 layout, so shift the fields around + * accordingly. If header_length is 8, it's a PHY packet, to + * which we need to prepend an extra quadlet. */ + if (packet->header_length > 8) { + ctx->d.header[0] = cpu_to_le32((packet->header[0] & 0xffff) | + (packet->speed << 16)); + ctx->d.header[1] = cpu_to_le32((packet->header[1] & 0xffff) | + (packet->header[0] & 0xffff0000)); + ctx->d.header[2] = cpu_to_le32(packet->header[2]); + + tcode = (packet->header[0] >> 4) & 0x0f; + if (TCODE_IS_BLOCK_PACKET(tcode)) + ctx->d.header[3] = cpu_to_le32(packet->header[3]); + else + ctx->d.header[3] = packet->header[3]; + } else { + ctx->d.header[0] = + cpu_to_le32((OHCI1394_phy_tcode << 4) | + (packet->speed << 16)); + ctx->d.header[1] = cpu_to_le32(packet->header[0]); + ctx->d.header[2] = cpu_to_le32(packet->header[1]); + ctx->d.more.req_count = cpu_to_le16(12); + } + + /* FIXME: Document how the locking works. */ + if (ohci->generation == packet->generation) { + reg_write(ctx->ohci, ctx->command_ptr, + ctx->descriptor_bus | z); + reg_write(ctx->ohci, ctx->control_set, + CONTEXT_RUN | CONTEXT_WAKE); + } else { + /* We dont return error codes from this function; all + * transmission errors are reported through the + * callback. */ + complete_transmission(packet, -ESTALE, list); + } +} + +static void at_context_stop(struct at_context *ctx) +{ + u32 reg; + + reg_write(ctx->ohci, ctx->control_clear, CONTEXT_RUN); + + reg = reg_read(ctx->ohci, ctx->control_set); + if (reg & CONTEXT_ACTIVE) + fw_notify("Tried to stop context, but it is still active " + "(0x%08x).\n", reg); +} + +static void at_context_tasklet(unsigned long data) +{ + struct at_context *ctx = (struct at_context *)data; + struct fw_ohci *ohci = ctx->ohci; + struct fw_packet *packet; + LIST_HEAD(list); + unsigned long flags; + int evt; + + spin_lock_irqsave(&ohci->lock, flags); + + packet = fw_packet(ctx->list.next); + + at_context_stop(ctx); + + if (packet->payload_length > 0) { + dma_unmap_single(ohci->card.device, packet->payload_bus, + packet->payload_length, DMA_TO_DEVICE); + evt = le16_to_cpu(ctx->d.last.transfer_status) & 0x1f; + packet->timestamp = le16_to_cpu(ctx->d.last.res_count); + } + else { + evt = le16_to_cpu(ctx->d.more.transfer_status) & 0x1f; + packet->timestamp = le16_to_cpu(ctx->d.more.res_count); + } + + if (evt < 16) { + switch (evt) { + case OHCI1394_evt_timeout: + /* Async response transmit timed out. */ + complete_transmission(packet, -ETIMEDOUT, &list); + break; + + case OHCI1394_evt_flushed: + /* The packet was flushed should give same + * error as when we try to use a stale + * generation count. */ + complete_transmission(packet, -ESTALE, &list); + break; + + case OHCI1394_evt_missing_ack: + /* This would be a higher level software + * error, it is using a valid (current) + * generation count, but the node is not on + * the bus. */ + complete_transmission(packet, -ENODEV, &list); + break; + + default: + complete_transmission(packet, -EIO, &list); + break; + } + } else + complete_transmission(packet, evt - 16, &list); + + /* If more packets are queued, set up the next one. */ + if (!list_empty(&ctx->list)) + at_context_setup_packet(ctx, &list); + + spin_unlock_irqrestore(&ohci->lock, flags); + + do_packet_callbacks(ohci, &list); +} + +static int +at_context_init(struct at_context *ctx, struct fw_ohci *ohci, u32 control_set) +{ + INIT_LIST_HEAD(&ctx->list); + + ctx->descriptor_bus = + dma_map_single(ohci->card.device, &ctx->d, + sizeof ctx->d, DMA_TO_DEVICE); + if (ctx->descriptor_bus == 0) + return -ENOMEM; + + ctx->control_set = control_set; + ctx->control_clear = control_set + 4; + ctx->command_ptr = control_set + 12; + ctx->ohci = ohci; + + tasklet_init(&ctx->tasklet, at_context_tasklet, (unsigned long)ctx); + + return 0; +} + +static void +at_context_transmit(struct at_context *ctx, struct fw_packet *packet) +{ + LIST_HEAD(list); + unsigned long flags; + int was_empty; + + spin_lock_irqsave(&ctx->ohci->lock, flags); + + was_empty = list_empty(&ctx->list); + list_add_tail(&packet->link, &ctx->list); + if (was_empty) + at_context_setup_packet(ctx, &list); + + spin_unlock_irqrestore(&ctx->ohci->lock, flags); + + do_packet_callbacks(ctx->ohci, &list); +} + +static void bus_reset_tasklet(unsigned long data) +{ + struct fw_ohci *ohci = (struct fw_ohci *)data; + int self_id_count, i, j, reg, node_id; + int generation, new_generation; + unsigned long flags; + + reg = reg_read(ohci, OHCI1394_NodeID); + if (!(reg & OHCI1394_NodeID_idValid)) { + fw_error("node ID not valid, new bus reset in progress\n"); + return; + } + node_id = reg & 0xffff; + + /* The count in the SelfIDCount register is the number of + * bytes in the self ID receive buffer. Since we also receive + * the inverted quadlets and a header quadlet, we shift one + * bit extra to get the actual number of self IDs. */ + + self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; + generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; + + for (i = 1, j = 0; j < self_id_count; i += 2, j++) { + if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) + fw_error("inconsistent self IDs\n"); + ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]); + } + + /* Check the consistency of the self IDs we just read. The + * problem we face is that a new bus reset can start while we + * read out the self IDs from the DMA buffer. If this happens, + * the DMA buffer will be overwritten with new self IDs and we + * will read out inconsistent data. The OHCI specification + * (section 11.2) recommends a technique similar to + * linux/seqlock.h, where we remember the generation of the + * self IDs in the buffer before reading them out and compare + * it to the current generation after reading them out. If + * the two generations match we know we have a consistent set + * of self IDs. */ + + new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; + if (new_generation != generation) { + fw_notify("recursive bus reset detected, " + "discarding self ids\n"); + return; + } + + /* FIXME: Document how the locking works. */ + spin_lock_irqsave(&ohci->lock, flags); + + ohci->generation = generation; + at_context_stop(&ohci->at_request_ctx); + at_context_stop(&ohci->at_response_ctx); + reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); + + /* This next bit is unrelated to the AT context stuff but we + * have to do it under the spinlock also. If a new config rom + * was set up before this reset, the old one is now no longer + * in use and we can free it. Update the config rom pointers + * to point to the current config rom and clear the + * next_config_rom pointer so a new udpate can take place. */ + + if (ohci->next_config_rom != NULL) { + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + ohci->config_rom, ohci->config_rom_bus); + ohci->config_rom = ohci->next_config_rom; + ohci->config_rom_bus = ohci->next_config_rom_bus; + ohci->next_config_rom = NULL; + + /* Restore config_rom image and manually update + * config_rom registers. Writing the header quadlet + * will indicate that the config rom is ready, so we + * do that last. */ + reg_write(ohci, OHCI1394_BusOptions, + be32_to_cpu(ohci->config_rom[2])); + ohci->config_rom[0] = cpu_to_be32(ohci->next_header); + reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header); + } + + spin_unlock_irqrestore(&ohci->lock, flags); + + fw_core_handle_bus_reset(&ohci->card, node_id, generation, + self_id_count, ohci->self_id_buffer); +} + +static irqreturn_t irq_handler(int irq, void *data) +{ + struct fw_ohci *ohci = data; + u32 event, iso_event; + int i; + + event = reg_read(ohci, OHCI1394_IntEventClear); + + if (!event) + return IRQ_NONE; + + reg_write(ohci, OHCI1394_IntEventClear, event); + + if (event & OHCI1394_selfIDComplete) + tasklet_schedule(&ohci->bus_reset_tasklet); + + if (event & OHCI1394_RQPkt) + tasklet_schedule(&ohci->ar_request_ctx.tasklet); + + if (event & OHCI1394_RSPkt) + tasklet_schedule(&ohci->ar_response_ctx.tasklet); + + if (event & OHCI1394_reqTxComplete) + tasklet_schedule(&ohci->at_request_ctx.tasklet); + + if (event & OHCI1394_respTxComplete) + tasklet_schedule(&ohci->at_response_ctx.tasklet); + + iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet); + reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); + + while (iso_event) { + i = ffs(iso_event) - 1; + tasklet_schedule(&ohci->ir_context_list[i].tasklet); + iso_event &= ~(1 << i); + } + + iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet); + reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); + + while (iso_event) { + i = ffs(iso_event) - 1; + tasklet_schedule(&ohci->it_context_list[i].tasklet); + iso_event &= ~(1 << i); + } + + return IRQ_HANDLED; +} + +static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) +{ + struct fw_ohci *ohci = fw_ohci(card); + struct pci_dev *dev = to_pci_dev(card->device); + + /* When the link is not yet enabled, the atomic config rom + * update mechanism described below in ohci_set_config_rom() + * is not active. We have to update ConfigRomHeader and + * BusOptions manually, and the write to ConfigROMmap takes + * effect immediately. We tie this to the enabling of the + * link, so we have a valid config rom before enabling - the + * OHCI requires that ConfigROMhdr and BusOptions have valid + * values before enabling. + * + * However, when the ConfigROMmap is written, some controllers + * always read back quadlets 0 and 2 from the config rom to + * the ConfigRomHeader and BusOptions registers on bus reset. + * They shouldn't do that in this initial case where the link + * isn't enabled. This means we have to use the same + * workaround here, setting the bus header to 0 and then write + * the right values in the bus reset tasklet. + */ + + ohci->next_config_rom = + dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, + &ohci->next_config_rom_bus, GFP_KERNEL); + if (ohci->next_config_rom == NULL) + return -ENOMEM; + + memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE); + fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4); + + ohci->next_header = config_rom[0]; + ohci->next_config_rom[0] = 0; + reg_write(ohci, OHCI1394_ConfigROMhdr, 0); + reg_write(ohci, OHCI1394_BusOptions, config_rom[2]); + reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); + + reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); + + if (request_irq(dev->irq, irq_handler, + SA_SHIRQ, ohci_driver_name, ohci)) { + fw_error("Failed to allocate shared interrupt %d.\n", + dev->irq); + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + ohci->config_rom, ohci->config_rom_bus); + return -EIO; + } + + reg_write(ohci, OHCI1394_HCControlSet, + OHCI1394_HCControl_linkEnable | + OHCI1394_HCControl_BIBimageValid); + flush_writes(ohci); + + /* We are ready to go, initiate bus reset to finish the + * initialization. */ + + fw_core_initiate_bus_reset(&ohci->card, 1); + + return 0; +} + +static int +ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) +{ + struct fw_ohci *ohci; + unsigned long flags; + int retval = 0; + __be32 *next_config_rom; + dma_addr_t next_config_rom_bus; + + ohci = fw_ohci(card); + + /* When the OHCI controller is enabled, the config rom update + * mechanism is a bit tricky, but easy enough to use. See + * section 5.5.6 in the OHCI specification. + * + * The OHCI controller caches the new config rom address in a + * shadow register (ConfigROMmapNext) and needs a bus reset + * for the changes to take place. When the bus reset is + * detected, the controller loads the new values for the + * ConfigRomHeader and BusOptions registers from the specified + * config rom and loads ConfigROMmap from the ConfigROMmapNext + * shadow register. All automatically and atomically. + * + * Now, there's a twist to this story. The automatic load of + * ConfigRomHeader and BusOptions doesn't honor the + * noByteSwapData bit, so with a be32 config rom, the + * controller will load be32 values in to these registers + * during the atomic update, even on litte endian + * architectures. The workaround we use is to put a 0 in the + * header quadlet; 0 is endian agnostic and means that the + * config rom isn't ready yet. In the bus reset tasklet we + * then set up the real values for the two registers. + * + * We use ohci->lock to avoid racing with the code that sets + * ohci->next_config_rom to NULL (see bus_reset_tasklet). + */ + + next_config_rom = + dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, + &next_config_rom_bus, GFP_KERNEL); + if (next_config_rom == NULL) + return -ENOMEM; + + spin_lock_irqsave(&ohci->lock, flags); + + if (ohci->next_config_rom == NULL) { + ohci->next_config_rom = next_config_rom; + ohci->next_config_rom_bus = next_config_rom_bus; + + memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE); + fw_memcpy_to_be32(ohci->next_config_rom, config_rom, + length * 4); + + ohci->next_header = config_rom[0]; + ohci->next_config_rom[0] = 0; + + reg_write(ohci, OHCI1394_ConfigROMmap, + ohci->next_config_rom_bus); + } else { + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + next_config_rom, next_config_rom_bus); + retval = -EBUSY; + } + + spin_unlock_irqrestore(&ohci->lock, flags); + + /* Now initiate a bus reset to have the changes take + * effect. We clean up the old config rom memory and DMA + * mappings in the bus reset tasklet, since the OHCI + * controller could need to access it before the bus reset + * takes effect. */ + if (retval == 0) + fw_core_initiate_bus_reset(&ohci->card, 1); + + return retval; +} + +static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) +{ + struct fw_ohci *ohci = fw_ohci(card); + + at_context_transmit(&ohci->at_request_ctx, packet); +} + +static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) +{ + struct fw_ohci *ohci = fw_ohci(card); + + at_context_transmit(&ohci->at_response_ctx, packet); +} + +static int +ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) +{ + struct fw_ohci *ohci = fw_ohci(card); + unsigned long flags; + int retval = 0; + + /* FIXME: make sure this bitmask is cleared when we clear the + * busReset interrupt bit. */ + + spin_lock_irqsave(&ohci->lock, flags); + + if (ohci->generation != generation) { + retval = -ESTALE; + goto out; + } + + if (node_id < 32) { + reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << node_id); + } else { + reg_write(ohci, OHCI1394_PhyReqFilterHiSet, + 1 << (node_id - 32)); + } + flush_writes(ohci); + + spin_unlock_irqrestore(&ohci->lock, flags); + + out: + return retval; +} + +static void ir_context_tasklet(unsigned long data) +{ + struct iso_context *ctx = (struct iso_context *)data; + + (void)ctx; +} + +#define ISO_BUFFER_SIZE (64 * 1024) + +static void flush_iso_context(struct iso_context *ctx) +{ + struct fw_ohci *ohci = fw_ohci(ctx->base.card); + struct descriptor *d, *last; + u32 address; + int z; + + dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus, + ISO_BUFFER_SIZE, DMA_TO_DEVICE); + + d = ctx->tail_descriptor; + last = ctx->tail_descriptor_last; + + while (last->branch_address != 0 && last->transfer_status != 0) { + address = le32_to_cpu(last->branch_address); + z = address & 0xf; + d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d; + + if (z == 2) + last = d; + else + last = d + z - 1; + + if (le16_to_cpu(last->control) & descriptor_irq_always) + ctx->base.callback(&ctx->base, + 0, le16_to_cpu(last->res_count), + ctx->base.callback_data); + } + + ctx->tail_descriptor = d; + ctx->tail_descriptor_last = last; +} + +static void it_context_tasklet(unsigned long data) +{ + struct iso_context *ctx = (struct iso_context *)data; + + flush_iso_context(ctx); +} + +static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, + int type) +{ + struct fw_ohci *ohci = fw_ohci(card); + struct iso_context *ctx, *list; + void (*tasklet) (unsigned long data); + u32 *mask; + unsigned long flags; + int index; + + if (type == FW_ISO_CONTEXT_TRANSMIT) { + mask = &ohci->it_context_mask; + list = ohci->it_context_list; + tasklet = it_context_tasklet; + } else { + mask = &ohci->ir_context_mask; + list = ohci->ir_context_list; + tasklet = ir_context_tasklet; + } + + spin_lock_irqsave(&ohci->lock, flags); + index = ffs(*mask) - 1; + if (index >= 0) + *mask &= ~(1 << index); + spin_unlock_irqrestore(&ohci->lock, flags); + + if (index < 0) + return ERR_PTR(-EBUSY); + + ctx = &list[index]; + memset(ctx, 0, sizeof *ctx); + tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx); + + ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL); + if (ctx->buffer == NULL) { + spin_lock_irqsave(&ohci->lock, flags); + *mask |= 1 << index; + spin_unlock_irqrestore(&ohci->lock, flags); + return ERR_PTR(-ENOMEM); + } + + ctx->buffer_bus = + dma_map_single(card->device, ctx->buffer, + ISO_BUFFER_SIZE, DMA_TO_DEVICE); + + ctx->head_descriptor = ctx->buffer; + ctx->prev_descriptor = ctx->buffer; + ctx->tail_descriptor = ctx->buffer; + ctx->tail_descriptor_last = ctx->buffer; + + /* We put a dummy descriptor in the buffer that has a NULL + * branch address and looks like it's been sent. That way we + * have a descriptor to append DMA programs to. Also, the + * ring buffer invariant is that it always has at least one + * element so that head == tail means buffer full. */ + + memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor); + ctx->head_descriptor->control = + cpu_to_le16(descriptor_output_last); + ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); + ctx->head_descriptor++; + + return &ctx->base; +} + +static int ohci_send_iso(struct fw_iso_context *base, s32 cycle) +{ + struct iso_context *ctx = (struct iso_context *)base; + struct fw_ohci *ohci = fw_ohci(ctx->base.card); + u32 cycle_match = 0; + int index; + + index = ctx - ohci->it_context_list; + if (cycle > 0) + cycle_match = CONTEXT_CYCLE_MATCH_ENABLE | + (cycle & 0x7fff) << 16; + + reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); + reg_write(ohci, OHCI1394_IsoXmitCommandPtr(index), + le32_to_cpu(ctx->tail_descriptor_last->branch_address)); + reg_write(ohci, OHCI1394_IsoXmitContextControlClear(index), ~0); + reg_write(ohci, OHCI1394_IsoXmitContextControlSet(index), + CONTEXT_RUN | cycle_match); + flush_writes(ohci); + + return 0; +} + +static void ohci_free_iso_context(struct fw_iso_context *base) +{ + struct fw_ohci *ohci = fw_ohci(base->card); + struct iso_context *ctx = (struct iso_context *)base; + unsigned long flags; + int index; + + flush_iso_context(ctx); + + spin_lock_irqsave(&ohci->lock, flags); + + if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { + index = ctx - ohci->it_context_list; + reg_write(ohci, OHCI1394_IsoXmitContextControlClear(index), ~0); + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); + ohci->it_context_mask |= 1 << index; + } else { + index = ctx - ohci->ir_context_list; + reg_write(ohci, OHCI1394_IsoRcvContextControlClear(index), ~0); + reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); + ohci->ir_context_mask |= 1 << index; + } + flush_writes(ohci); + + dma_unmap_single(ohci->card.device, ctx->buffer_bus, + ISO_BUFFER_SIZE, DMA_TO_DEVICE); + + spin_unlock_irqrestore(&ohci->lock, flags); +} + +static int +ohci_queue_iso(struct fw_iso_context *base, + struct fw_iso_packet *packet, void *payload) +{ + struct iso_context *ctx = (struct iso_context *)base; + struct fw_ohci *ohci = fw_ohci(ctx->base.card); + struct descriptor *d, *end, *last, *tail, *pd; + struct fw_iso_packet *p; + __le32 *header; + dma_addr_t d_bus; + u32 z, header_z, payload_z, irq; + u32 payload_index, payload_end_index, next_page_index; + int index, page, end_page, i, length, offset; + + /* FIXME: Cycle lost behavior should be configurable: lose + * packet, retransmit or terminate.. */ + + p = packet; + payload_index = payload - ctx->base.buffer; + d = ctx->head_descriptor; + tail = ctx->tail_descriptor; + end = ctx->buffer + ISO_BUFFER_SIZE / sizeof(struct descriptor); + + if (p->skip) + z = 1; + else + z = 2; + if (p->header_length > 0) + z++; + + /* Determine the first page the payload isn't contained in. */ + end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; + if (p->payload_length > 0) + payload_z = end_page - (payload_index >> PAGE_SHIFT); + else + payload_z = 0; + + z += payload_z; + + /* Get header size in number of descriptors. */ + header_z = DIV_ROUND_UP(p->header_length, sizeof *d); + + if (d + z + header_z <= tail) { + goto has_space; + } else if (d > tail && d + z + header_z <= end) { + goto has_space; + } else if (d > tail && ctx->buffer + z + header_z <= tail) { + d = ctx->buffer; + goto has_space; + } + + /* No space in buffer */ + return -1; + + has_space: + memset(d, 0, (z + header_z) * sizeof *d); + d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d; + + if (!p->skip) { + d[0].control = cpu_to_le16(descriptor_key_immediate); + d[0].req_count = cpu_to_le16(8); + + header = (__le32 *) &d[1]; + header[0] = cpu_to_le32(it_header_sy(p->sy) | + it_header_tag(p->tag) | + it_header_tcode(TCODE_STREAM_DATA) | + it_header_channel(ctx->base.channel) | + it_header_speed(ctx->base.speed)); + header[1] = + cpu_to_le32(it_header_data_length(p->header_length + + p->payload_length)); + } + + if (p->header_length > 0) { + d[2].req_count = cpu_to_le16(p->header_length); + d[2].data_address = cpu_to_le32(d_bus + z * sizeof *d); + memcpy(&d[z], p->header, p->header_length); + } + + pd = d + z - payload_z; + payload_end_index = payload_index + p->payload_length; + for (i = 0; i < payload_z; i++) { + page = payload_index >> PAGE_SHIFT; + offset = payload_index & ~PAGE_MASK; + next_page_index = (page + 1) << PAGE_SHIFT; + length = + min(next_page_index, payload_end_index) - payload_index; + pd[i].req_count = cpu_to_le16(length); + pd[i].data_address = cpu_to_le32(ctx->base.pages[page] + offset); + + payload_index += length; + } + + if (z == 2) + last = d; + else + last = d + z - 1; + + if (p->interrupt) + irq = descriptor_irq_always; + else + irq = descriptor_no_irq; + + last->control = cpu_to_le16(descriptor_output_last | + descriptor_status | + descriptor_branch_always | + irq); + + dma_sync_single_for_device(ohci->card.device, ctx->buffer_bus, + ISO_BUFFER_SIZE, DMA_TO_DEVICE); + + ctx->head_descriptor = d + z + header_z; + ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z); + ctx->prev_descriptor = last; + + index = ctx - ohci->it_context_list; + reg_write(ohci, OHCI1394_IsoXmitContextControlSet(index), CONTEXT_WAKE); + flush_writes(ohci); + + return 0; +} + +static struct fw_card_driver ohci_driver = { + .name = ohci_driver_name, + .enable = ohci_enable, + .update_phy_reg = ohci_update_phy_reg, + .set_config_rom = ohci_set_config_rom, + .send_request = ohci_send_request, + .send_response = ohci_send_response, + .enable_phys_dma = ohci_enable_phys_dma, + + .allocate_iso_context = ohci_allocate_iso_context, + .free_iso_context = ohci_free_iso_context, + .queue_iso = ohci_queue_iso, + .send_iso = ohci_send_iso +}; + +static int software_reset(struct fw_ohci *ohci) +{ + int i; + + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); + + for (i = 0; i < OHCI_LOOP_COUNT; i++) { + if ((reg_read(ohci, OHCI1394_HCControlSet) & + OHCI1394_HCControl_softReset) == 0) + return 0; + msleep(1); + } + + return -EBUSY; +} + +/* ---------- pci subsystem interface ---------- */ + +enum { + CLEANUP_SELF_ID, + CLEANUP_REGISTERS, + CLEANUP_IOMEM, + CLEANUP_DISABLE, + CLEANUP_PUT_CARD, +}; + +static int cleanup(struct fw_ohci *ohci, int stage, int code) +{ + struct pci_dev *dev = to_pci_dev(ohci->card.device); + + switch (stage) { + case CLEANUP_SELF_ID: + dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, + ohci->self_id_cpu, ohci->self_id_bus); + case CLEANUP_REGISTERS: + kfree(ohci->it_context_list); + kfree(ohci->ir_context_list); + pci_iounmap(dev, ohci->registers); + case CLEANUP_IOMEM: + pci_release_region(dev, 0); + case CLEANUP_DISABLE: + pci_disable_device(dev); + case CLEANUP_PUT_CARD: + fw_card_put(&ohci->card); + } + + return code; +} + +static int __devinit +pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) +{ + struct fw_ohci *ohci; + u32 bus_options, max_receive, link_speed; + u64 guid; + int error_code; + size_t size; + + ohci = kzalloc(sizeof *ohci, GFP_KERNEL); + if (ohci == NULL) { + fw_error("Could not malloc fw_ohci data.\n"); + return -ENOMEM; + } + + fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); + + if (pci_enable_device(dev)) { + fw_error("Failed to enable OHCI hardware.\n"); + return cleanup(ohci, CLEANUP_PUT_CARD, -ENODEV); + } + + pci_set_master(dev); + pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); + pci_set_drvdata(dev, ohci); + + spin_lock_init(&ohci->lock); + + tasklet_init(&ohci->bus_reset_tasklet, + bus_reset_tasklet, (unsigned long)ohci); + + if (pci_request_region(dev, 0, ohci_driver_name)) { + fw_error("MMIO resource unavailable\n"); + return cleanup(ohci, CLEANUP_DISABLE, -EBUSY); + } + + ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); + if (ohci->registers == NULL) { + fw_error("Failed to remap registers\n"); + return cleanup(ohci, CLEANUP_IOMEM, -ENXIO); + } + + if (software_reset(ohci)) { + fw_error("Failed to reset ohci card.\n"); + return cleanup(ohci, CLEANUP_REGISTERS, -EBUSY); + } + + /* Now enable LPS, which we need in order to start accessing + * most of the registers. In fact, on some cards (ALI M5251), + * accessing registers in the SClk domain without LPS enabled + * will lock up the machine. Wait 50msec to make sure we have + * full link enabled. */ + reg_write(ohci, OHCI1394_HCControlSet, + OHCI1394_HCControl_LPS | + OHCI1394_HCControl_postedWriteEnable); + flush_writes(ohci); + msleep(50); + + reg_write(ohci, OHCI1394_HCControlClear, + OHCI1394_HCControl_noByteSwapData); + + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_rcvSelfID | + OHCI1394_LinkControl_cycleTimerEnable | + OHCI1394_LinkControl_cycleMaster); + + ar_context_init(&ohci->ar_request_ctx, ohci, + OHCI1394_AsReqRcvContextControlSet); + + ar_context_init(&ohci->ar_response_ctx, ohci, + OHCI1394_AsRspRcvContextControlSet); + + at_context_init(&ohci->at_request_ctx, ohci, + OHCI1394_AsReqTrContextControlSet); + + at_context_init(&ohci->at_response_ctx, ohci, + OHCI1394_AsRspTrContextControlSet); + + reg_write(ohci, OHCI1394_ATRetries, + OHCI1394_MAX_AT_REQ_RETRIES | + (OHCI1394_MAX_AT_RESP_RETRIES << 4) | + (OHCI1394_MAX_PHYS_RESP_RETRIES << 8)); + + reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); + ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); + reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); + size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask); + ohci->it_context_list = kzalloc(size, GFP_KERNEL); + + reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); + ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); + size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); + ohci->ir_context_list = kzalloc(size, GFP_KERNEL); + + if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { + fw_error("Out of memory for it/ir contexts.\n"); + return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM); + } + + /* self-id dma buffer allocation */ + ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, + SELF_ID_BUF_SIZE, + &ohci->self_id_bus, + GFP_KERNEL); + if (ohci->self_id_cpu == NULL) { + fw_error("Out of memory for self ID buffer.\n"); + return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM); + } + + reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); + reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); + reg_write(ohci, OHCI1394_IntEventClear, ~0); + reg_write(ohci, OHCI1394_IntMaskClear, ~0); + reg_write(ohci, OHCI1394_IntMaskSet, + OHCI1394_selfIDComplete | + OHCI1394_RQPkt | OHCI1394_RSPkt | + OHCI1394_reqTxComplete | OHCI1394_respTxComplete | + OHCI1394_isochRx | OHCI1394_isochTx | + OHCI1394_masterIntEnable); + + bus_options = reg_read(ohci, OHCI1394_BusOptions); + max_receive = (bus_options >> 12) & 0xf; + link_speed = bus_options & 0x7; + guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | + reg_read(ohci, OHCI1394_GUIDLo); + + error_code = fw_card_add(&ohci->card, max_receive, link_speed, guid); + if (error_code < 0) + return cleanup(ohci, CLEANUP_SELF_ID, error_code); + + fw_notify("Added fw-ohci device %s.\n", dev->dev.bus_id); + + return 0; +} + +static void pci_remove(struct pci_dev *dev) +{ + struct fw_ohci *ohci; + + ohci = pci_get_drvdata(dev); + reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_masterIntEnable); + fw_core_remove_card(&ohci->card); + + /* FIXME: Fail all pending packets here, now that the upper + * layers can't queue any more. */ + + software_reset(ohci); + free_irq(dev->irq, ohci); + cleanup(ohci, CLEANUP_SELF_ID, 0); + + fw_notify("Removed fw-ohci device.\n"); +} + +static struct pci_device_id pci_table[] = { + { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, + { } +}; + +MODULE_DEVICE_TABLE(pci, pci_table); + +static struct pci_driver fw_ohci_pci_driver = { + .name = ohci_driver_name, + .id_table = pci_table, + .probe = pci_probe, + .remove = pci_remove, +}; + +MODULE_AUTHOR("Kristian Hoegsberg "); +MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); +MODULE_LICENSE("GPL"); + +static int __init fw_ohci_init(void) +{ + return pci_register_driver(&fw_ohci_pci_driver); +} + +static void __exit fw_ohci_cleanup(void) +{ + pci_unregister_driver(&fw_ohci_pci_driver); +} + +module_init(fw_ohci_init); +module_exit(fw_ohci_cleanup); diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h new file mode 100644 index 0000000..35e2a75 --- /dev/null +++ b/drivers/firewire/fw-ohci.h @@ -0,0 +1,152 @@ +#ifndef __fw_ohci_h +#define __fw_ohci_h + +/* OHCI register map */ + +#define OHCI1394_Version 0x000 +#define OHCI1394_GUID_ROM 0x004 +#define OHCI1394_ATRetries 0x008 +#define OHCI1394_CSRData 0x00C +#define OHCI1394_CSRCompareData 0x010 +#define OHCI1394_CSRControl 0x014 +#define OHCI1394_ConfigROMhdr 0x018 +#define OHCI1394_BusID 0x01C +#define OHCI1394_BusOptions 0x020 +#define OHCI1394_GUIDHi 0x024 +#define OHCI1394_GUIDLo 0x028 +#define OHCI1394_ConfigROMmap 0x034 +#define OHCI1394_PostedWriteAddressLo 0x038 +#define OHCI1394_PostedWriteAddressHi 0x03C +#define OHCI1394_VendorID 0x040 +#define OHCI1394_HCControlSet 0x050 +#define OHCI1394_HCControlClear 0x054 +#define OHCI1394_HCControl_BIBimageValid 0x80000000 +#define OHCI1394_HCControl_noByteSwapData 0x40000000 +#define OHCI1394_HCControl_programPhyEnable 0x00800000 +#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000 +#define OHCI1394_HCControl_LPS 0x00080000 +#define OHCI1394_HCControl_postedWriteEnable 0x00040000 +#define OHCI1394_HCControl_linkEnable 0x00020000 +#define OHCI1394_HCControl_softReset 0x00010000 +#define OHCI1394_SelfIDBuffer 0x064 +#define OHCI1394_SelfIDCount 0x068 +#define OHCI1394_IRMultiChanMaskHiSet 0x070 +#define OHCI1394_IRMultiChanMaskHiClear 0x074 +#define OHCI1394_IRMultiChanMaskLoSet 0x078 +#define OHCI1394_IRMultiChanMaskLoClear 0x07C +#define OHCI1394_IntEventSet 0x080 +#define OHCI1394_IntEventClear 0x084 +#define OHCI1394_IntMaskSet 0x088 +#define OHCI1394_IntMaskClear 0x08C +#define OHCI1394_IsoXmitIntEventSet 0x090 +#define OHCI1394_IsoXmitIntEventClear 0x094 +#define OHCI1394_IsoXmitIntMaskSet 0x098 +#define OHCI1394_IsoXmitIntMaskClear 0x09C +#define OHCI1394_IsoRecvIntEventSet 0x0A0 +#define OHCI1394_IsoRecvIntEventClear 0x0A4 +#define OHCI1394_IsoRecvIntMaskSet 0x0A8 +#define OHCI1394_IsoRecvIntMaskClear 0x0AC +#define OHCI1394_InitialBandwidthAvailable 0x0B0 +#define OHCI1394_InitialChannelsAvailableHi 0x0B4 +#define OHCI1394_InitialChannelsAvailableLo 0x0B8 +#define OHCI1394_FairnessControl 0x0DC +#define OHCI1394_LinkControlSet 0x0E0 +#define OHCI1394_LinkControlClear 0x0E4 +#define OHCI1394_LinkControl_rcvSelfID (1 << 9) +#define OHCI1394_LinkControl_rcvPhyPkt (1 << 10) +#define OHCI1394_LinkControl_cycleTimerEnable (1 << 20) +#define OHCI1394_LinkControl_cycleMaster (1 << 21) +#define OHCI1394_LinkControl_cycleSource (1 << 22) +#define OHCI1394_NodeID 0x0E8 +#define OHCI1394_NodeID_idValid 0x80000000 +#define OHCI1394_PhyControl 0x0EC +#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000) +#define OHCI1394_PhyControl_ReadDone 0x80000000 +#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16) +#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000) +#define OHCI1394_PhyControl_WriteDone 0x00004000 +#define OHCI1394_IsochronousCycleTimer 0x0F0 +#define OHCI1394_AsReqFilterHiSet 0x100 +#define OHCI1394_AsReqFilterHiClear 0x104 +#define OHCI1394_AsReqFilterLoSet 0x108 +#define OHCI1394_AsReqFilterLoClear 0x10C +#define OHCI1394_PhyReqFilterHiSet 0x110 +#define OHCI1394_PhyReqFilterHiClear 0x114 +#define OHCI1394_PhyReqFilterLoSet 0x118 +#define OHCI1394_PhyReqFilterLoClear 0x11C +#define OHCI1394_PhyUpperBound 0x120 + +#define OHCI1394_AsReqTrContextBase 0x180 +#define OHCI1394_AsReqTrContextControlSet 0x180 +#define OHCI1394_AsReqTrContextControlClear 0x184 +#define OHCI1394_AsReqTrCommandPtr 0x18C + +#define OHCI1394_AsRspTrContextBase 0x1A0 +#define OHCI1394_AsRspTrContextControlSet 0x1A0 +#define OHCI1394_AsRspTrContextControlClear 0x1A4 +#define OHCI1394_AsRspTrCommandPtr 0x1AC + +#define OHCI1394_AsReqRcvContextBase 0x1C0 +#define OHCI1394_AsReqRcvContextControlSet 0x1C0 +#define OHCI1394_AsReqRcvContextControlClear 0x1C4 +#define OHCI1394_AsReqRcvCommandPtr 0x1CC + +#define OHCI1394_AsRspRcvContextBase 0x1E0 +#define OHCI1394_AsRspRcvContextControlSet 0x1E0 +#define OHCI1394_AsRspRcvContextControlClear 0x1E4 +#define OHCI1394_AsRspRcvCommandPtr 0x1EC + +/* Isochronous transmit registers */ +#define OHCI1394_IsoXmitContextBase(n) (0x200 + 16 * (n)) +#define OHCI1394_IsoXmitContextControlSet(n) (0x200 + 16 * (n)) +#define OHCI1394_IsoXmitContextControlClear(n) (0x204 + 16 * (n)) +#define OHCI1394_IsoXmitCommandPtr(n) (0x20C + 16 * (n)) + +/* Isochronous receive registers */ +#define OHCI1394_IsoRcvContextControlSet(n) (0x400 + 32 * (n)) +#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n)) +#define OHCI1394_IsoRcvCommandPtr(n) (0x40C + 32 * (n)) +#define OHCI1394_IsoRcvContextMatch(n) (0x410 + 32 * (n)) + +/* Interrupts Mask/Events */ +#define OHCI1394_reqTxComplete 0x00000001 +#define OHCI1394_respTxComplete 0x00000002 +#define OHCI1394_ARRQ 0x00000004 +#define OHCI1394_ARRS 0x00000008 +#define OHCI1394_RQPkt 0x00000010 +#define OHCI1394_RSPkt 0x00000020 +#define OHCI1394_isochTx 0x00000040 +#define OHCI1394_isochRx 0x00000080 +#define OHCI1394_postedWriteErr 0x00000100 +#define OHCI1394_lockRespErr 0x00000200 +#define OHCI1394_selfIDComplete 0x00010000 +#define OHCI1394_busReset 0x00020000 +#define OHCI1394_phy 0x00080000 +#define OHCI1394_cycleSynch 0x00100000 +#define OHCI1394_cycle64Seconds 0x00200000 +#define OHCI1394_cycleLost 0x00400000 +#define OHCI1394_cycleInconsistent 0x00800000 +#define OHCI1394_unrecoverableError 0x01000000 +#define OHCI1394_cycleTooLong 0x02000000 +#define OHCI1394_phyRegRcvd 0x04000000 +#define OHCI1394_masterIntEnable 0x80000000 + +#define OHCI1394_evt_no_status 0x0 +#define OHCI1394_evt_long_packet 0x2 +#define OHCI1394_evt_missing_ack 0x3 +#define OHCI1394_evt_underrun 0x4 +#define OHCI1394_evt_overrun 0x5 +#define OHCI1394_evt_descriptor_read 0x6 +#define OHCI1394_evt_data_read 0x7 +#define OHCI1394_evt_data_write 0x8 +#define OHCI1394_evt_bus_reset 0x9 +#define OHCI1394_evt_timeout 0xa +#define OHCI1394_evt_tcode_err 0xb +#define OHCI1394_evt_reserved_b 0xc +#define OHCI1394_evt_reserved_c 0xd +#define OHCI1394_evt_unknown 0xe +#define OHCI1394_evt_flushed 0xf + +#define OHCI1394_phy_tcode 0xe + +#endif /* __fw_ohci_h */ -- cgit v0.10.2 From 9ba136d0fe5a3dd33533b4a2a21156aa22f80ebe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 19 Dec 2006 19:58:40 -0500 Subject: firewire: Add SBP-2 protocol driver for storage devices. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Kristian Høgsberg Signed-off-by: Stefan Richter diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index b386334..bfab4b3 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -31,4 +31,16 @@ config FW_OHCI To compile this driver as a module, say M here: the module will be called fw-ohci. +config FW_SBP2 + tristate "Support for storage devices (SBP-2 protocol driver)" + depends on FW && SCSI + help + This option enables you to use SBP-2 devices connected to an + firewire bus. SBP-2 devices include storage devices like + harddisks and DVD drives, also some other FireWire devices + like scanners. + + You should also enable support for disks, CD-ROMs, etc. in the SCSI + configuration section. + endmenu diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index add3b98..b955c99 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile @@ -7,3 +7,4 @@ fw-core-objs := fw-card.o fw-topology.o fw-transaction.o fw-iso.o \ obj-$(CONFIG_FW) += fw-core.o obj-$(CONFIG_FW_OHCI) += fw-ohci.o +obj-$(CONFIG_FW_SBP2) += fw-sbp2.o \ No newline at end of file diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c new file mode 100644 index 0000000..2756e0c --- /dev/null +++ b/drivers/firewire/fw-sbp2.c @@ -0,0 +1,1073 @@ +/* -*- c-basic-offset: 8 -*- + * fw-sbp2.c -- SBP2 driver (SCSI over IEEE1394) + * + * Copyright (C) 2005-2006 Kristian Hoegsberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "fw-transaction.h" +#include "fw-topology.h" +#include "fw-device.h" + +/* I don't know why the SCSI stack doesn't define something like this... */ +typedef void (*scsi_done_fn_t) (struct scsi_cmnd *); + +static const char sbp2_driver_name[] = "sbp2"; + +struct sbp2_device { + struct fw_unit *unit; + struct fw_address_handler address_handler; + struct list_head orb_list; + u64 management_agent_address; + u64 command_block_agent_address; + u32 workarounds; + int login_id; + + /* We cache these addresses and only update them once we've + * logged in or reconnected to the sbp2 device. That way, any + * IO to the device will automatically fail and get retried if + * it happens in a window where the device is not ready to + * handle it (e.g. after a bus reset but before we reconnect). */ + int node_id; + int address_high; + int generation; + + struct work_struct work; + struct Scsi_Host *scsi_host; +}; + +#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 +#define SBP2_MAX_SECTORS 255 /* Max sectors supported */ +#define SBP2_MAX_CMDS 8 /* This should be safe */ + +#define SBP2_ORB_NULL 0x80000000 + +#define SBP2_DIRECTION_TO_MEDIA 0x0 +#define SBP2_DIRECTION_FROM_MEDIA 0x1 + +/* Unit directory keys */ +#define SBP2_COMMAND_SET_SPECIFIER 0x38 +#define SBP2_COMMAND_SET 0x39 +#define SBP2_COMMAND_SET_REVISION 0x3b +#define SBP2_FIRMWARE_REVISION 0x3c + +/* Flags for detected oddities and brokeness */ +#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1 +#define SBP2_WORKAROUND_INQUIRY_36 0x2 +#define SBP2_WORKAROUND_MODE_SENSE_8 0x4 +#define SBP2_WORKAROUND_FIX_CAPACITY 0x8 +#define SBP2_WORKAROUND_OVERRIDE 0x100 + +/* Management orb opcodes */ +#define SBP2_LOGIN_REQUEST 0x0 +#define SBP2_QUERY_LOGINS_REQUEST 0x1 +#define SBP2_RECONNECT_REQUEST 0x3 +#define SBP2_SET_PASSWORD_REQUEST 0x4 +#define SBP2_LOGOUT_REQUEST 0x7 +#define SBP2_ABORT_TASK_REQUEST 0xb +#define SBP2_ABORT_TASK_SET 0xc +#define SBP2_LOGICAL_UNIT_RESET 0xe +#define SBP2_TARGET_RESET_REQUEST 0xf + +/* Offsets for command block agent registers */ +#define SBP2_AGENT_STATE 0x00 +#define SBP2_AGENT_RESET 0x04 +#define SBP2_ORB_POINTER 0x08 +#define SBP2_DOORBELL 0x10 +#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14 + +/* Status write response codes */ +#define SBP2_STATUS_REQUEST_COMPLETE 0x0 +#define SBP2_STATUS_TRANSPORT_FAILURE 0x1 +#define SBP2_STATUS_ILLEGAL_REQUEST 0x2 +#define SBP2_STATUS_VENDOR_DEPENDENT 0x3 + +#define status_get_orb_high(v) ((v).status & 0xffff) +#define status_get_sbp_status(v) (((v).status >> 16) & 0xff) +#define status_get_len(v) (((v).status >> 24) & 0x07) +#define status_get_dead(v) (((v).status >> 27) & 0x01) +#define status_get_response(v) (((v).status >> 28) & 0x03) +#define status_get_source(v) (((v).status >> 30) & 0x03) +#define status_get_orb_low(v) ((v).orb_low) +#define status_get_data(v) ((v).data) + +struct sbp2_status { + u32 status; + u32 orb_low; + u8 data[24]; +}; + +struct sbp2_pointer { + u32 high; + u32 low; +}; + +struct sbp2_orb { + struct fw_transaction t; + dma_addr_t request_bus; + int rcode; + struct sbp2_pointer pointer; + void (*callback) (struct sbp2_orb * orb, struct sbp2_status * status); + struct list_head link; +}; + +#define management_orb_lun(v) ((v)) +#define management_orb_function(v) ((v) << 16) +#define management_orb_reconnect(v) ((v) << 20) +#define management_orb_exclusive ((1) << 28) +#define management_orb_request_format(v) ((v) << 29) +#define management_orb_notify ((1) << 31) + +#define management_orb_response_length(v) ((v)) +#define management_orb_password_length(v) ((v) << 16) + +struct sbp2_management_orb { + struct sbp2_orb base; + struct { + struct sbp2_pointer password; + struct sbp2_pointer response; + u32 misc; + u32 length; + struct sbp2_pointer status_fifo; + } request; + __be32 response[4]; + dma_addr_t response_bus; + struct completion done; + struct sbp2_status status; +}; + +#define login_response_get_login_id(v) ((v).misc & 0xffff) +#define login_response_get_length(v) (((v).misc >> 16) & 0xffff) + +struct sbp2_login_response { + u32 misc; + struct sbp2_pointer command_block_agent; + u32 reconnect_hold; +}; + +#define command_orb_data_size(v) ((v)) +#define command_orb_page_size(v) ((v) << 16) +#define command_orb_page_table_present ((1) << 19) +#define command_orb_max_payload(v) ((v) << 20) +#define command_orb_speed(v) ((v) << 24) +#define command_orb_direction(v) ((v) << 27) +#define command_orb_request_format(v) ((v) << 29) +#define command_orb_notify ((1) << 31) + +struct sbp2_command_orb { + struct sbp2_orb base; + struct { + struct sbp2_pointer next; + struct sbp2_pointer data_descriptor; + u32 misc; + u8 command_block[12]; + } request; + struct scsi_cmnd *cmd; + scsi_done_fn_t done; + struct fw_unit *unit; + + struct sbp2_pointer page_table[SG_ALL]; + dma_addr_t page_table_bus; + dma_addr_t request_buffer_bus; +}; + +/* + * List of devices with known bugs. + * + * The firmware_revision field, masked with 0xffff00, is the best + * indicator for the type of bridge chip of a device. It yields a few + * false positives but this did not break correctly behaving devices + * so far. We use ~0 as a wildcard, since the 24 bit values we get + * from the config rom can never match that. + */ +static const struct { + u32 firmware_revision; + u32 model; + unsigned workarounds; +} sbp2_workarounds_table[] = { + /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { + .firmware_revision = 0x002800, + .model = 0x001010, + .workarounds = SBP2_WORKAROUND_INQUIRY_36 | + SBP2_WORKAROUND_MODE_SENSE_8, + }, + /* Initio bridges, actually only needed for some older ones */ { + .firmware_revision = 0x000200, + .model = ~0, + .workarounds = SBP2_WORKAROUND_INQUIRY_36, + }, + /* Symbios bridge */ { + .firmware_revision = 0xa0b800, + .model = ~0, + .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, + }, + /* There are iPods (2nd gen, 3rd gen) with model_id == 0, but + * these iPods do not feature the read_capacity bug according + * to one report. Read_capacity behaviour as well as model_id + * could change due to Apple-supplied firmware updates though. */ + /* iPod 4th generation. */ { + .firmware_revision = 0x0a2700, + .model = 0x000021, + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, + }, + /* iPod mini */ { + .firmware_revision = 0x0a2700, + .model = 0x000023, + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, + }, + /* iPod Photo */ { + .firmware_revision = 0x0a2700, + .model = 0x00007e, + .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, + } +}; + +static void +sbp2_status_write(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, + int generation, int speed, + unsigned long long offset, + void *payload, size_t length, void *callback_data) +{ + struct sbp2_device *sd = callback_data; + struct sbp2_orb *orb; + struct sbp2_status status; + size_t header_size; + unsigned long flags; + + if (tcode != TCODE_WRITE_BLOCK_REQUEST || + length == 0 || length > sizeof status) { + fw_send_response(card, request, RCODE_TYPE_ERROR); + return; + } + + header_size = min(length, 2 * sizeof(u32)); + fw_memcpy_from_be32(&status, payload, header_size); + if (length > header_size) + memcpy(status.data, payload + 8, length - header_size); + if (status_get_source(status) == 2 || status_get_source(status) == 3) { + fw_notify("non-orb related status write, not handled\n"); + fw_send_response(card, request, RCODE_COMPLETE); + return; + } + + /* Lookup the orb corresponding to this status write. */ + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry(orb, &sd->orb_list, link) { + if (status_get_orb_high(status) == 0 && + status_get_orb_low(status) == orb->request_bus) { + list_del(&orb->link); + break; + } + } + spin_unlock_irqrestore(&card->lock, flags); + + if (&orb->link != &sd->orb_list) + orb->callback(orb, &status); + else + fw_error("status write for unknown orb\n"); + + fw_send_response(card, request, RCODE_COMPLETE); +} + +static void +complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + struct sbp2_orb *orb = data; + unsigned long flags; + + orb->rcode = rcode; + if (rcode != RCODE_COMPLETE) { + spin_lock_irqsave(&card->lock, flags); + list_del(&orb->link); + spin_unlock_irqrestore(&card->lock, flags); + orb->callback(orb, NULL); + } +} + +static void +sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit, + int node_id, int generation, u64 offset) +{ + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_device *sd = unit->device.driver_data; + unsigned long flags; + + orb->pointer.high = 0; + orb->pointer.low = orb->request_bus; + fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof orb->pointer); + + spin_lock_irqsave(&device->card->lock, flags); + list_add_tail(&orb->link, &sd->orb_list); + spin_unlock_irqrestore(&device->card->lock, flags); + + fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, + node_id | LOCAL_BUS, generation, + device->node->max_speed, offset, + &orb->pointer, sizeof orb->pointer, + complete_transaction, orb); +} + +static void sbp2_cancel_orbs(struct fw_unit *unit) +{ + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_device *sd = unit->device.driver_data; + struct sbp2_orb *orb, *next; + struct list_head list; + unsigned long flags; + + INIT_LIST_HEAD(&list); + spin_lock_irqsave(&device->card->lock, flags); + list_splice_init(&sd->orb_list, &list); + spin_unlock_irqrestore(&device->card->lock, flags); + + list_for_each_entry_safe(orb, next, &list, link) { + orb->rcode = RCODE_CANCELLED; + orb->callback(orb, NULL); + } +} + +static void +complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) +{ + struct sbp2_management_orb *orb = + (struct sbp2_management_orb *)base_orb; + + if (status) + memcpy(&orb->status, status, sizeof *status); + complete(&orb->done); +} + +static int +sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, + int function, int lun, void *response) +{ + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_device *sd = unit->device.driver_data; + struct sbp2_management_orb *orb; + unsigned long timeout; + int retval = -ENOMEM; + + orb = kzalloc(sizeof *orb, GFP_ATOMIC); + if (orb == NULL) + return -ENOMEM; + + /* The sbp2 device is going to send a block read request to + * read out the request from host memory, so map it for + * dma. */ + orb->base.request_bus = + dma_map_single(device->card->device, &orb->request, + sizeof orb->request, DMA_TO_DEVICE); + if (orb->base.request_bus == 0) + goto out; + + orb->response_bus = + dma_map_single(device->card->device, &orb->response, + sizeof orb->response, DMA_FROM_DEVICE); + if (orb->response_bus == 0) + goto out; + + orb->request.response.high = 0; + orb->request.response.low = orb->response_bus; + + orb->request.misc = + management_orb_notify | + management_orb_function(function) | + management_orb_lun(lun); + orb->request.length = + management_orb_response_length(sizeof orb->response); + + orb->request.status_fifo.high = sd->address_handler.offset >> 32; + orb->request.status_fifo.low = sd->address_handler.offset; + + /* FIXME: Yeah, ok this isn't elegant, we hardwire exclusive + * login and 1 second reconnect time. The reconnect setting + * is probably fine, but the exclusive login should be an + * option. */ + if (function == SBP2_LOGIN_REQUEST) { + orb->request.misc |= + management_orb_exclusive | + management_orb_reconnect(0); + } + + fw_memcpy_to_be32(&orb->request, &orb->request, sizeof orb->request); + + init_completion(&orb->done); + orb->base.callback = complete_management_orb; + sbp2_send_orb(&orb->base, unit, + node_id, generation, sd->management_agent_address); + + timeout = wait_for_completion_timeout(&orb->done, 10 * HZ); + + /* FIXME: Handle bus reset race here. */ + + retval = -EIO; + if (orb->base.rcode != RCODE_COMPLETE) { + fw_error("management write failed, rcode 0x%02x\n", + orb->base.rcode); + goto out; + } + + if (timeout == 0) { + fw_error("orb reply timed out, rcode=0x%02x\n", + orb->base.rcode); + goto out; + } + + if (status_get_response(orb->status) != 0 || + status_get_sbp_status(orb->status) != 0) { + fw_error("error status: %d:%d\n", + status_get_response(orb->status), + status_get_sbp_status(orb->status)); + goto out; + } + + retval = 0; + out: + dma_unmap_single(device->card->device, orb->base.request_bus, + sizeof orb->request, DMA_TO_DEVICE); + dma_unmap_single(device->card->device, orb->response_bus, + sizeof orb->response, DMA_FROM_DEVICE); + + if (response) + fw_memcpy_from_be32(response, + orb->response, sizeof orb->response); + kfree(orb); + + return retval; +} + +static void +complete_agent_reset_write(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + struct fw_transaction *t = data; + + fw_notify("agent reset write rcode=%d\n", rcode); + kfree(t); +} + +static int sbp2_agent_reset(struct fw_unit *unit) +{ + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_device *sd = unit->device.driver_data; + struct fw_transaction *t; + static u32 zero; + + t = kzalloc(sizeof *t, GFP_ATOMIC); + if (t == NULL) + return -ENOMEM; + + fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, + sd->node_id | LOCAL_BUS, sd->generation, SCODE_400, + sd->command_block_agent_address + SBP2_AGENT_RESET, + &zero, sizeof zero, complete_agent_reset_write, t); + + return 0; +} + +static int add_scsi_devices(struct fw_unit *unit); +static void remove_scsi_devices(struct fw_unit *unit); + +static int sbp2_probe(struct device *dev) +{ + struct fw_unit *unit = fw_unit(dev); + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_device *sd; + struct fw_csr_iterator ci; + int i, key, value, lun, retval; + int node_id, generation, local_node_id; + struct sbp2_login_response response; + u32 model, firmware_revision; + + sd = kzalloc(sizeof *sd, GFP_KERNEL); + if (sd == NULL) + return -ENOMEM; + + unit->device.driver_data = sd; + sd->unit = unit; + INIT_LIST_HEAD(&sd->orb_list); + + sd->address_handler.length = 0x100; + sd->address_handler.address_callback = sbp2_status_write; + sd->address_handler.callback_data = sd; + + if (fw_core_add_address_handler(&sd->address_handler, + &fw_high_memory_region) < 0) { + kfree(sd); + return -EBUSY; + } + + if (fw_device_enable_phys_dma(device) < 0) { + fw_core_remove_address_handler(&sd->address_handler); + kfree(sd); + return -EBUSY; + } + + /* Scan unit directory to get management agent address, + * firmware revison and model. Initialize firmware_revision + * and model to values that wont match anything in our table. */ + firmware_revision = 0xff000000; + model = 0xff000000; + fw_csr_iterator_init(&ci, unit->directory); + while (fw_csr_iterator_next(&ci, &key, &value)) { + switch (key) { + case CSR_DEPENDENT_INFO | CSR_OFFSET: + sd->management_agent_address = + 0xfffff0000000ULL + 4 * value; + break; + case SBP2_FIRMWARE_REVISION: + firmware_revision = value; + break; + case CSR_MODEL: + model = value; + break; + } + } + + for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { + if (sbp2_workarounds_table[i].firmware_revision != + (firmware_revision & 0xffffff00)) + continue; + if (sbp2_workarounds_table[i].model != model && + sbp2_workarounds_table[i].model != ~0) + continue; + sd->workarounds |= sbp2_workarounds_table[i].workarounds; + break; + } + + if (sd->workarounds) + fw_notify("Workarounds for node %s: 0x%x " + "(firmware_revision 0x%06x, model_id 0x%06x)\n", + unit->device.bus_id, + sd->workarounds, firmware_revision, model); + + /* FIXME: Make this work for multi-lun devices. */ + lun = 0; + + generation = device->card->generation; + node_id = device->node->node_id; + local_node_id = device->card->local_node->node_id; + + /* FIXME: We should probably do this from a keventd callback + * and handle retries by rescheduling the work. */ + if (sbp2_send_management_orb(unit, node_id, generation, + SBP2_LOGIN_REQUEST, lun, &response) < 0) { + fw_core_remove_address_handler(&sd->address_handler); + kfree(sd); + return -EBUSY; + } + + sd->generation = generation; + sd->node_id = node_id; + sd->address_high = (LOCAL_BUS | local_node_id) << 16; + + /* Get command block agent offset and login id. */ + sd->command_block_agent_address = + ((u64) response.command_block_agent.high << 32) | + response.command_block_agent.low; + sd->login_id = login_response_get_login_id(response); + + fw_notify("logged in to sbp2 unit %s\n", unit->device.bus_id); + fw_notify(" - management_agent_address: 0x%012llx\n", + (unsigned long long) sd->management_agent_address); + fw_notify(" - command_block_agent_address: 0x%012llx\n", + (unsigned long long) sd->command_block_agent_address); + fw_notify(" - status write address: 0x%012llx\n", + (unsigned long long) sd->address_handler.offset); + +#if 0 + /* FIXME: The linux1394 sbp2 does this last step. */ + sbp2_set_busy_timeout(scsi_id); +#endif + + sbp2_agent_reset(unit); + + retval = add_scsi_devices(unit); + if (retval < 0) { + sbp2_send_management_orb(unit, sd->node_id, sd->generation, + SBP2_LOGOUT_REQUEST, sd->login_id, + NULL); + fw_core_remove_address_handler(&sd->address_handler); + kfree(sd); + return retval; + } + + return 0; +} + +static int sbp2_remove(struct device *dev) +{ + struct fw_unit *unit = fw_unit(dev); + struct sbp2_device *sd = unit->device.driver_data; + + sbp2_send_management_orb(unit, sd->node_id, sd->generation, + SBP2_LOGOUT_REQUEST, sd->login_id, NULL); + + remove_scsi_devices(unit); + + fw_core_remove_address_handler(&sd->address_handler); + kfree(sd); + + fw_notify("removed sbp2 unit %s\n", dev->bus_id); + + return 0; +} + +static void sbp2_reconnect(struct work_struct *work) +{ + struct sbp2_device *sd = container_of(work, struct sbp2_device, work); + struct fw_unit *unit = sd->unit; + struct fw_device *device = fw_device(unit->device.parent); + int generation, node_id, local_node_id; + + fw_notify("in sbp2_reconnect, reconnecting to unit %s\n", + unit->device.bus_id); + + generation = device->card->generation; + node_id = device->node->node_id; + local_node_id = device->card->local_node->node_id; + + sbp2_send_management_orb(unit, node_id, generation, + SBP2_RECONNECT_REQUEST, sd->login_id, NULL); + + /* FIXME: handle reconnect failures. */ + + sbp2_cancel_orbs(unit); + + sd->generation = generation; + sd->node_id = node_id; + sd->address_high = (LOCAL_BUS | local_node_id) << 16; +} + +static void sbp2_update(struct fw_unit *unit) +{ + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_device *sd = unit->device.driver_data; + + fw_device_enable_phys_dma(device); + + INIT_WORK(&sd->work, sbp2_reconnect); + schedule_work(&sd->work); +} + +#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e +#define SBP2_SW_VERSION_ENTRY 0x00010483 + +static struct fw_device_id sbp2_id_table[] = { + { + .match_flags = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION, + .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY, + .version = SBP2_SW_VERSION_ENTRY + }, + { } +}; + +static struct fw_driver sbp2_driver = { + .driver = { + .owner = THIS_MODULE, + .name = sbp2_driver_name, + .bus = &fw_bus_type, + .probe = sbp2_probe, + .remove = sbp2_remove, + }, + .update = sbp2_update, + .id_table = sbp2_id_table, +}; + +static unsigned int sbp2_status_to_sense_data(u8 * sbp2_status, u8 * sense_data) +{ + sense_data[0] = 0x70; + sense_data[1] = 0x0; + sense_data[2] = sbp2_status[1]; + sense_data[3] = sbp2_status[4]; + sense_data[4] = sbp2_status[5]; + sense_data[5] = sbp2_status[6]; + sense_data[6] = sbp2_status[7]; + sense_data[7] = 10; + sense_data[8] = sbp2_status[8]; + sense_data[9] = sbp2_status[9]; + sense_data[10] = sbp2_status[10]; + sense_data[11] = sbp2_status[11]; + sense_data[12] = sbp2_status[2]; + sense_data[13] = sbp2_status[3]; + sense_data[14] = sbp2_status[12]; + sense_data[15] = sbp2_status[13]; + + switch (sbp2_status[0] & 0x3f) { + case SAM_STAT_GOOD: + return DID_OK; + + case SAM_STAT_CHECK_CONDITION: + /* return CHECK_CONDITION << 1 | DID_OK << 16; */ + return DID_OK; + + case SAM_STAT_BUSY: + return DID_BUS_BUSY; + + case SAM_STAT_CONDITION_MET: + case SAM_STAT_RESERVATION_CONFLICT: + case SAM_STAT_COMMAND_TERMINATED: + default: + return DID_ERROR; + } +} + +static void +complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) +{ + struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb; + struct fw_unit *unit = orb->unit; + struct fw_device *device = fw_device(unit->device.parent); + struct scatterlist *sg; + int result; + + if (status != NULL) { + if (status_get_dead(*status)) { + fw_notify("agent died, issuing agent reset\n"); + sbp2_agent_reset(unit); + } + + switch (status_get_response(*status)) { + case SBP2_STATUS_REQUEST_COMPLETE: + result = DID_OK; + break; + case SBP2_STATUS_TRANSPORT_FAILURE: + result = DID_BUS_BUSY; + break; + case SBP2_STATUS_ILLEGAL_REQUEST: + case SBP2_STATUS_VENDOR_DEPENDENT: + default: + result = DID_ERROR; + break; + } + + if (result == DID_OK && status_get_len(*status) > 1) + result = sbp2_status_to_sense_data(status_get_data(*status), + orb->cmd->sense_buffer); + } else { + /* If the orb completes with status == NULL, something + * went wrong, typically a bus reset happened mid-orb + * or when sending the write (less likely). */ + fw_notify("no command orb status, rcode=%d\n", + orb->base.rcode); + result = DID_ERROR; + } + + dma_unmap_single(device->card->device, orb->base.request_bus, + sizeof orb->request, DMA_TO_DEVICE); + + if (orb->cmd->use_sg > 0) { + sg = (struct scatterlist *)orb->cmd->request_buffer; + dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg, + orb->cmd->sc_data_direction); + } + + if (orb->page_table_bus != 0) + dma_unmap_single(device->card->device, orb->page_table_bus, + sizeof orb->page_table_bus, DMA_TO_DEVICE); + + if (orb->request_buffer_bus != 0) + dma_unmap_single(device->card->device, orb->request_buffer_bus, + sizeof orb->request_buffer_bus, + DMA_FROM_DEVICE); + + orb->cmd->result = result << 16; + orb->done(orb->cmd); + + kfree(orb); +} + +static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) +{ + struct fw_unit *unit = + (struct fw_unit *)orb->cmd->device->host->hostdata[0]; + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_device *sd = unit->device.driver_data; + struct scatterlist *sg; + int sg_len, l, i, j, count; + size_t size; + dma_addr_t sg_addr; + + sg = (struct scatterlist *)orb->cmd->request_buffer; + count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg, + orb->cmd->sc_data_direction); + + /* Handle the special case where there is only one element in + * the scatter list by converting it to an immediate block + * request. This is also a workaround for broken devices such + * as the second generation iPod which doesn't support page + * tables. */ + if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { + orb->request.data_descriptor.high = sd->address_high; + orb->request.data_descriptor.low = sg_dma_address(sg); + orb->request.misc |= + command_orb_data_size(sg_dma_len(sg)); + return; + } + + /* Convert the scatterlist to an sbp2 page table. If any + * scatterlist entries are too big for sbp2 we split the as we go. */ + for (i = 0, j = 0; i < count; i++) { + sg_len = sg_dma_len(sg + i); + sg_addr = sg_dma_address(sg + i); + while (sg_len) { + l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH); + orb->page_table[j].low = sg_addr; + orb->page_table[j].high = (l << 16); + sg_addr += l; + sg_len -= l; + j++; + } + } + + size = sizeof orb->page_table[0] * j; + + /* The data_descriptor pointer is the one case where we need + * to fill in the node ID part of the address. All other + * pointers assume that the data referenced reside on the + * initiator (i.e. us), but data_descriptor can refer to data + * on other nodes so we need to put our ID in descriptor.high. */ + + orb->page_table_bus = + dma_map_single(device->card->device, orb->page_table, + size, DMA_TO_DEVICE); + orb->request.data_descriptor.high = sd->address_high; + orb->request.data_descriptor.low = orb->page_table_bus; + orb->request.misc |= + command_orb_page_table_present | + command_orb_data_size(j); + + fw_memcpy_to_be32(orb->page_table, orb->page_table, size); +} + +static void sbp2_command_orb_map_buffer(struct sbp2_command_orb *orb) +{ + struct fw_unit *unit = + (struct fw_unit *)orb->cmd->device->host->hostdata[0]; + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_device *sd = unit->device.driver_data; + + /* As for map_scatterlist, we need to fill in the high bits of + * the data_descriptor pointer. */ + + orb->request_buffer_bus = + dma_map_single(device->card->device, + orb->cmd->request_buffer, + orb->cmd->request_bufflen, + orb->cmd->sc_data_direction); + orb->request.data_descriptor.high = sd->address_high; + orb->request.data_descriptor.low = orb->request_buffer_bus; + orb->request.misc |= + command_orb_data_size(orb->cmd->request_bufflen); +} + +/* SCSI stack integration */ + +static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) +{ + struct fw_unit *unit = (struct fw_unit *)cmd->device->host->hostdata[0]; + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_device *sd = unit->device.driver_data; + struct sbp2_command_orb *orb; + + /* Bidirectional commands are not yet implemented, and unknown + * transfer direction not handled. */ + if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { + fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); + cmd->result = DID_ERROR << 16; + done(cmd); + return 0; + } + + orb = kzalloc(sizeof *orb, GFP_ATOMIC); + if (orb == NULL) { + fw_notify("failed to alloc orb\n"); + cmd->result = DID_NO_CONNECT << 16; + done(cmd); + return 0; + } + + orb->base.request_bus = + dma_map_single(device->card->device, &orb->request, + sizeof orb->request, DMA_TO_DEVICE); + + orb->unit = unit; + orb->done = done; + orb->cmd = cmd; + + orb->request.next.high = SBP2_ORB_NULL; + orb->request.next.low = 0x0; + /* At speed 100 we can do 512 bytes per packet, at speed 200, + * 1024 bytes per packet etc. The SBP-2 max_payload field + * specifies the max payload size as 2 ^ (max_payload + 2), so + * if we set this to max_speed + 7, we get the right value. */ + orb->request.misc = + command_orb_max_payload(device->node->max_speed + 7) | + command_orb_speed(device->node->max_speed) | + command_orb_notify; + + if (cmd->sc_data_direction == DMA_FROM_DEVICE) + orb->request.misc |= + command_orb_direction(SBP2_DIRECTION_FROM_MEDIA); + else if (cmd->sc_data_direction == DMA_TO_DEVICE) + orb->request.misc |= + command_orb_direction(SBP2_DIRECTION_TO_MEDIA); + + if (cmd->use_sg) { + sbp2_command_orb_map_scatterlist(orb); + } else if (cmd->request_bufflen > SBP2_MAX_SG_ELEMENT_LENGTH) { + /* FIXME: Need to split this into a sg list... but + * could we get the scsi or blk layer to do that by + * reporting our max supported block size? */ + fw_error("command > 64k\n"); + cmd->result = DID_ERROR << 16; + done(cmd); + return 0; + } else if (cmd->request_bufflen > 0) { + sbp2_command_orb_map_buffer(orb); + } + + fw_memcpy_to_be32(&orb->request, &orb->request, sizeof orb->request); + + memset(orb->request.command_block, + 0, sizeof orb->request.command_block); + memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); + + orb->base.callback = complete_command_orb; + + sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation, + sd->command_block_agent_address + SBP2_ORB_POINTER); + + return 0; +} + +static int sbp2_scsi_slave_configure(struct scsi_device *sdev) +{ + struct fw_unit *unit = (struct fw_unit *)sdev->host->hostdata[0]; + struct sbp2_device *sd = unit->device.driver_data; + + if (sdev->type == TYPE_DISK && + sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) + sdev->skip_ms_page_8 = 1; + if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) { + fw_notify("setting fix_capacity for %s\n", unit->device.bus_id); + sdev->fix_capacity = 1; + } + + return 0; +} + +/* + * Called by scsi stack when something has really gone wrong. Usually + * called when a command has timed-out for some reason. + */ +static int sbp2_scsi_abort(struct scsi_cmnd *cmd) +{ + struct fw_unit *unit = (struct fw_unit *)cmd->device->host->hostdata[0]; + + fw_notify("sbp2_scsi_abort\n"); + + sbp2_cancel_orbs(unit); + + return SUCCESS; +} + +static struct scsi_host_template scsi_driver_template = { + .module = THIS_MODULE, + .name = "SBP-2 IEEE-1394", + .proc_name = (char *)sbp2_driver_name, + .queuecommand = sbp2_scsi_queuecommand, + .slave_configure = sbp2_scsi_slave_configure, + .eh_abort_handler = sbp2_scsi_abort, + .this_id = -1, + .sg_tablesize = SG_ALL, + .use_clustering = ENABLE_CLUSTERING, + .cmd_per_lun = 1, /* SBP2_MAX_CMDS, */ + .can_queue = 1, /* SBP2_MAX_CMDS, */ + .emulated = 1, +}; + +static int add_scsi_devices(struct fw_unit *unit) +{ + struct sbp2_device *sd = unit->device.driver_data; + int retval, lun; + + sd->scsi_host = scsi_host_alloc(&scsi_driver_template, + sizeof(unsigned long)); + if (sd->scsi_host == NULL) { + fw_error("failed to register scsi host\n"); + return -1; + } + + sd->scsi_host->hostdata[0] = (unsigned long)unit; + retval = scsi_add_host(sd->scsi_host, &unit->device); + if (retval < 0) { + fw_error("failed to add scsi host\n"); + scsi_host_put(sd->scsi_host); + return retval; + } + + /* FIXME: Loop over luns here. */ + lun = 0; + retval = scsi_add_device(sd->scsi_host, 0, 0, lun); + if (retval < 0) { + fw_error("failed to add scsi device\n"); + scsi_remove_host(sd->scsi_host); + scsi_host_put(sd->scsi_host); + return retval; + } + + return 0; +} + +static void remove_scsi_devices(struct fw_unit *unit) +{ + struct sbp2_device *sd = unit->device.driver_data; + + scsi_remove_host(sd->scsi_host); + scsi_host_put(sd->scsi_host); +} + +MODULE_AUTHOR("Kristian Hoegsberg "); +MODULE_DESCRIPTION("SCSI over IEEE1394"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); + +static int __init sbp2_init(void) +{ + return driver_register(&sbp2_driver.driver); +} + +static void __exit sbp2_cleanup(void) +{ + driver_unregister(&sbp2_driver.driver); +} + +module_init(sbp2_init); +module_exit(sbp2_cleanup); -- cgit v0.10.2 From fcf7770a0bf8e36318f5e73aa5b88bda97b7f920 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 27 Dec 2006 13:51:57 -0800 Subject: firewire: printk warning fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/firewire/fw-ohci.c: In function 'ar_context_init': drivers/firewire/fw-ohci.c:288: warning: format '%08x' expects type 'unsigned int', but argument 2 has type 'dma_addr_t' Signed-off-by: Andrew Morton Signed-off-by: Kristian Høgsberg Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 5392a2b..b046b07 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -285,8 +285,8 @@ ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 control_set) return -ENOMEM; if (ctx->descriptor_bus & 0xf) - fw_notify("descriptor not 16-byte aligned: 0x%08x\n", - ctx->descriptor_bus); + fw_notify("descriptor not 16-byte aligned: 0x%08lx\n", + (unsigned long)ctx->descriptor_bus); ctx->buffer_bus = dma_map_single(ohci->card.device, ctx->buffer, -- cgit v0.10.2 From cf3e72fd85092bf7246c8266aff293c50e99b990 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 27 Dec 2006 14:36:37 -0800 Subject: firewire: build fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit alpha: drivers/firewire/fw-ohci.c: In function 'ar_context_tasklet': drivers/firewire/fw-ohci.c:266: warning: implicit declaration of function 'dma_sync_single_for_device' drivers/firewire/fw-ohci.c:267: error: 'DMA_TO_DEVICE' undeclared (first use in this function) drivers/firewire/fw-ohci.c:267: error: (Each undeclared identifier is reported only once drivers/firewire/fw-ohci.c:267: error: for each function it appears in.) drivers/firewire/fw-ohci.c: In function 'ar_context_init': drivers/firewire/fw-ohci.c:282: warning: implicit declaration of function 'dma_map_single' drivers/firewire/fw-ohci.c:283: error: 'DMA_TO_DEVICE' undeclared (first use in this function) drivers/firewire/fw-ohci.c:293: error: 'DMA_FROM_DEVICE' undeclared (first use in this function) etc. Signed-off-by: Andrew Morton Signed-off-by: Kristian Høgsberg Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index b046b07..5d42d18 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -25,6 +25,8 @@ #include #include #include +#include + #include #include -- cgit v0.10.2 From 0b5b2903449a027d4fd04c58cee88e276f1608ce Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 27 Dec 2006 14:49:23 -0800 Subject: firewire: build fix 2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ia64: drivers/firewire/fw-sbp2.c: In function `sbp2_command_orb_map_scatterlist': drivers/firewire/fw-sbp2.c:826: warning: implicit declaration of function `sg_dma_len' drivers/firewire/fw-sbp2.c:828: warning: implicit declaration of function `sg_dma_address' drivers/firewire/fw-sbp2.c:837: error: invalid use of undefined type `struct scatterlist' drivers/firewire/fw-sbp2.c:838: error: invalid use of undefined type `struct scatterlist' Signed-off-by: Andrew Morton Signed-off-by: Kristian Høgsberg Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 2756e0c..1ac7253 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include -- cgit v0.10.2 From fe69ca3ac226980dd2cc367a7613cd776cdb8c88 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 28 Dec 2006 12:46:54 +0100 Subject: firewire: build fix 3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/firewire/fw-sbp2.c: At top level: drivers/firewire/fw-sbp2.c:1060: error: storage size of '__mod_ieee1394_device_table' isn't known (error pointed out by akpm) Signed-off-by: Kristian Høgsberg Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 1ac7253..ce9c41f 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c @@ -20,6 +20,7 @@ #include #include +#include #include #include #include -- cgit v0.10.2 From 9c87da4eb37e316e89d4004766f3f4a375c15bdd Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 28 Dec 2006 16:20:00 +0100 Subject: firewire: mark fw-core as experimental The new stack is not yet stable WRT functionality and APIs. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index bfab4b3..72f8568 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -2,8 +2,12 @@ menu "IEEE 1394 (FireWire) support (JUJU alternative stack)" +comment "This is an EXPERIMENTAL set of alternative FireWire drivers." + depends on EXPERIMENTAL=n + config FW - tristate "IEEE 1394 (FireWire) support (JUJU alternative stack)" + tristate "IEEE 1394 (FireWire) support (JUJU alternative stack, experimental)" + depends on EXPERIMENTAL help IEEE 1394 describes a high performance serial bus, which is also known as FireWire(tm) or i.Link(tm) and is used for connecting all -- cgit v0.10.2 From 35b7541c8fb40e298017c2b1a90d82d740c3e2eb Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 4 Jan 2007 18:35:00 +0100 Subject: firewire: typo in Kconfig Correct a typo. Also spell FireWire consistently. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 72f8568..8e14675 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -18,17 +18,17 @@ config FW is the core support only, you will also need to select a driver for your IEEE 1394 adapter. - This is the "JUJU" firewire stack, an alternative - implementation designed for roboustness and simplicity. + This is the "JUJU" FireWire stack, an alternative + implementation designed for robustness and simplicity. To compile this driver as a module, say M here: the module will be called fw-core. config FW_OHCI - tristate "Support for OHCI firewire host controllers" + tristate "Support for OHCI FireWire host controllers" depends on PCI && FW help - Enable this driver if you have an firewire controller based + Enable this driver if you have a FireWire controller based on the OHCI specification. For all practical purposes, this is the only chipset in use, so say Y here. @@ -39,8 +39,8 @@ config FW_SBP2 tristate "Support for storage devices (SBP-2 protocol driver)" depends on FW && SCSI help - This option enables you to use SBP-2 devices connected to an - firewire bus. SBP-2 devices include storage devices like + This option enables you to use SBP-2 devices connected to a + FireWire bus. SBP-2 devices include storage devices like harddisks and DVD drives, also some other FireWire devices like scanners. -- cgit v0.10.2 From 687198bbd2679cb72cf381da070082d3d9f57edf Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 28 Dec 2006 16:20:00 +0100 Subject: firewire: consistent ifndef blocks in header files Replace __fw_core_h by __fw_transaction_h to match the file name. Add comments to the final #endif in header files. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-device-cdev.h b/drivers/firewire/fw-device-cdev.h index 18b20c2..e2ae933 100644 --- a/drivers/firewire/fw-device-cdev.h +++ b/drivers/firewire/fw-device-cdev.h @@ -143,4 +143,4 @@ struct fw_cdev_send_iso { __s32 cycle; }; -#endif +#endif /* __fw_cdev_h */ diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 84cd5e7..731abbe 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h @@ -124,4 +124,4 @@ fw_driver(struct device_driver *drv) extern struct file_operations fw_device_ops; -#endif +#endif /* __fw_device_h */ diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h index 7582d6e..32ea7cd 100644 --- a/drivers/firewire/fw-topology.h +++ b/drivers/firewire/fw-topology.h @@ -81,4 +81,4 @@ fw_node_put(struct fw_node *node) void fw_destroy_nodes(struct fw_card *card); -#endif +#endif /* __fw_topology_h */ diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 7f618f2..292add5 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -19,8 +19,8 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#ifndef __fw_core_h -#define __fw_core_h +#ifndef __fw_transaction_h +#define __fw_transaction_h #include #include @@ -423,4 +423,4 @@ fw_core_handle_response(struct fw_card *card, u32 length, u32 *payload); -#endif /* __fw_core_h */ +#endif /* __fw_transaction_h */ -- cgit v0.10.2 From 22a38e72bc18b9e8b860182c512efaade5590c7d Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 31 Dec 2006 17:20:20 +0100 Subject: firewire: put old and new stack into same Kconfig submenu Screenshot from "make menuconfig": ... ?????????????????????? IEEE 1394 (FireWire) support ??????????????????????? ? Arrow keys navigate the menu. selects submenus --->. ? ... ? ??????????????????????????????????????????????????????????????????????? ? ? ? IEEE 1394 (FireWire) support (JUJU alternative stack, experim? ? ? ? Support for OHCI firewire host controllers ? ? ? ? Support for storage devices (SBP-2 protocol driver) ? ? ? ? IEEE 1394 (FireWire) support ? ? ? ? --- Subsystem Options ? ? ? ? [ ] Excessive debugging output ? ? ... ?