1 From 31d1ee0e615ad0fc84bd781d905f1cd56ae02909 Mon Sep 17 00:00:00 2001
2 From: Philip Balister <balister@moose.(none)>
3 Date: Wed, 13 Jan 2010 14:35:39 -0500
4 Subject: [PATCH 03/22] usrp-embedded : Add driver for USRP Embedded FPGA interface.
7 arch/arm/mach-omap2/board-overo.c | 176 +++++-
8 drivers/misc/Kconfig | 9 +
9 drivers/misc/Makefile | 1 +
10 drivers/misc/usrp_e.c | 1481 +++++++++++++++++++++++++++++++++++++
11 include/linux/usrp_e.h | 87 +++
12 5 files changed, 1750 insertions(+), 4 deletions(-)
13 create mode 100644 drivers/misc/usrp_e.c
14 create mode 100644 include/linux/usrp_e.h
16 diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
17 index 8a44c17..8686015 100644
18 --- a/arch/arm/mach-omap2/board-overo.c
19 +++ b/arch/arm/mach-omap2/board-overo.c
21 #include <linux/mtd/partitions.h>
22 #include <linux/mmc/host.h>
24 +#include <linux/spi/spi.h>
26 #include <asm/mach-types.h>
27 #include <asm/mach/arch.h>
28 #include <asm/mach/flash.h>
30 #define OVERO_GPIO_USBH_NRESET 183
32 #define NAND_BLOCK_SIZE SZ_128K
33 +#define GPMC_CS0_BASE 0x60
34 +#define GPMC_CS_SIZE 0x30
36 #define OVERO_SMSC911X_CS 5
37 #define OVERO_SMSC911X_GPIO 176
38 @@ -164,7 +168,9 @@ static struct platform_device overo_smsc911x2_device = {
40 static struct platform_device *smsc911x_devices[] = {
41 &overo_smsc911x_device,
43 &overo_smsc911x2_device,
47 static inline void __init overo_init_smsc911x(void)
48 @@ -194,6 +200,7 @@ static inline void __init overo_init_smsc911x(void)
50 /* set up second smsc911x chip */
53 if (gpmc_cs_request(OVERO_SMSC911X2_CS, SZ_16M, &cs_mem_base2) < 0) {
54 printk(KERN_ERR "Failed request for GPMC mem for smsc911x2\n");
56 @@ -213,6 +220,7 @@ static inline void __init overo_init_smsc911x(void)
57 overo_smsc911x2_resources[1].start = OMAP_GPIO_IRQ(OVERO_SMSC911X2_GPIO);
58 overo_smsc911x2_resources[1].end = 0;
61 platform_add_devices(smsc911x_devices, ARRAY_SIZE(smsc911x_devices));
64 @@ -353,6 +361,16 @@ static struct regulator_consumer_supply overo_vdda_dac_supply =
65 static struct regulator_consumer_supply overo_vdds_dsi_supply =
66 REGULATOR_SUPPLY("vdds_dsi", "omapdss");
68 +static struct spi_board_info overo_mcspi_board_info[] = {
70 + .modalias = "spidev",
71 + .max_speed_hz = 12000000, // 12 MHz
78 static struct mtd_partition overo_nand_partitions[] = {
81 @@ -432,8 +450,8 @@ static struct omap2_hsmmc_info mmc[] = {
83 .caps = MMC_CAP_4_BIT_DATA,
86 - .transceiver = true,
87 + .transceiver = true,
89 .ocr_mask = 0x00100000, /* 3.3V */
92 @@ -612,6 +630,8 @@ static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
93 static struct omap_board_mux board_mux[] __initdata = {
94 { .reg_offset = OMAP_MUX_TERMINATOR },
97 +#define board_mux NULL
100 static struct omap_musb_board_data musb_board_data = {
101 @@ -626,6 +646,148 @@ static struct omap_musb_board_data musb_board_data = {
105 +static void __init usrp1_e_init(void)
109 + printk("Setup up gpmc timing.\n");
111 +// Set up CS4, data read/write
114 + // Signal control parameters per chip select
115 + tmp = gpmc_cs_read_reg(4, GPMC_CS_CONFIG1);
116 +// tmp |= (GPMC_CONFIG1_MUXADDDATA);
117 +// tmp |= (GPMC_CONFIG1_WRITETYPE_SYNC);
118 +// tmp |= (GPMC_CONFIG1_READTYPE_SYNC);
119 + tmp |= (GPMC_CONFIG1_FCLK_DIV(0));
120 + gpmc_cs_write_reg(4, GPMC_CS_CONFIG1, tmp);
121 + printk("GPMC_CONFIG1 reg: %x\n", tmp);
125 + // CS signal timing parameter configuration
127 + tmp |= GPMC_CONFIG2_CSONTIME(1); /* 1 */
128 + tmp |= GPMC_CONFIG2_CSWROFFTIME(16); /* 16 */
129 + tmp |= GPMC_CONFIG2_CSRDOFFTIME(16); /* 16 */
130 + printk("GPMC_CONFIG2 reg: %x\n", tmp);
131 + gpmc_cs_write_reg(4, GPMC_CS_CONFIG2, tmp);
135 + // nADV signal timing parameter configuration
137 + tmp |= GPMC_CONFIG3_ADVONTIME(1);
138 + tmp |= GPMC_CONFIG3_ADVRDOFFTIME(2);
139 + tmp |= GPMC_CONFIG3_ADVWROFFTIME(2);
140 + printk("GPMC_CONFIG3 reg: %x\n", tmp);
141 + gpmc_cs_write_reg(4, GPMC_CS_CONFIG3, tmp);
145 + // nWE and nOE signals timing parameter configuration
147 + tmp |= GPMC_CONFIG4_WEONTIME(3); /* 3 */
148 + tmp |= GPMC_CONFIG4_WEOFFTIME(16); /* 16 */
149 + tmp |= GPMC_CONFIG4_OEONTIME(3); /* 3 */
150 + tmp |= GPMC_CONFIG4_OEOFFTIME(16); /* 16 */
151 + printk("GPMC_CONFIG4 reg: %x\n", tmp);
152 + gpmc_cs_write_reg(4, GPMC_CS_CONFIG4, tmp);
156 + // RdAccess time and Cycle time timing parameters configuration
158 + tmp |= GPMC_CONFIG5_PAGEBURSTACCESSTIME(1);
159 + tmp |= GPMC_CONFIG5_RDACCESSTIME(15); /* 15 */
160 + tmp |= GPMC_CONFIG5_WRCYCLETIME(17); /* 17 */
161 + tmp |= GPMC_CONFIG5_RDCYCLETIME(17); /* 17 */
162 + printk("GPMC_CONFIG5 reg: %x\n", tmp);
164 + gpmc_cs_write_reg(4, GPMC_CS_CONFIG5, tmp);
168 + // WrAccessTime WrDataOnADmuxBus, Cycle2Cycle, and BusTurnAround params
170 + tmp |= GPMC_CONFIG6_WRACCESSTIME(15); /* 15 */
171 + tmp |= GPMC_CONFIG6_WRDATAONADMUXBUS(3);
172 + tmp |= GPMC_CONFIG6_CYCLE2CYCLEDELAY(0);
173 + tmp |= GPMC_CONFIG6_BUSTURNAROUND(0);
174 + printk("GPMC_CONFIG6 reg: %x\n", tmp);
175 + gpmc_cs_write_reg(4, GPMC_CS_CONFIG6, tmp);
178 +// Configure timing for CS6, wishbone access
181 + // Signal control parameters per chip select
182 + tmp = gpmc_cs_read_reg(6, GPMC_CS_CONFIG1);
183 +// tmp |= (GPMC_CONFIG1_MUXADDDATA);
184 + tmp |= (GPMC_CONFIG1_WRITETYPE_SYNC);
185 + tmp |= (GPMC_CONFIG1_READTYPE_SYNC);
186 + tmp |= (GPMC_CONFIG1_FCLK_DIV(0));
187 + gpmc_cs_write_reg(6, GPMC_CS_CONFIG1, tmp);
188 + printk("GPMC_CONFIG1 reg: %x\n", tmp);
192 + // CS signal timing parameter configuration
194 + tmp |= GPMC_CONFIG2_CSONTIME(1);
195 + tmp |= GPMC_CONFIG2_CSWROFFTIME(17);
196 + tmp |= GPMC_CONFIG2_CSRDOFFTIME(17);
197 + printk("GPMC_CONFIG2 reg: %x\n", tmp);
198 + gpmc_cs_write_reg(6, GPMC_CS_CONFIG2, tmp);
202 + // nADV signal timing parameter configuration
204 + tmp |= GPMC_CONFIG3_ADVONTIME(1);
205 + tmp |= GPMC_CONFIG3_ADVRDOFFTIME(2);
206 + tmp |= GPMC_CONFIG3_ADVWROFFTIME(2);
207 + printk("GPMC_CONFIG3 reg: %x\n", tmp);
208 + gpmc_cs_write_reg(6, GPMC_CS_CONFIG3, tmp);
212 + // nWE and nOE signals timing parameter configuration
214 + tmp |= GPMC_CONFIG4_WEONTIME(3);
215 + tmp |= GPMC_CONFIG4_WEOFFTIME(4);
216 + tmp |= GPMC_CONFIG4_OEONTIME(3);
217 + tmp |= GPMC_CONFIG4_OEOFFTIME(4);
218 + printk("GPMC_CONFIG4 reg: %x\n", tmp);
219 + gpmc_cs_write_reg(6, GPMC_CS_CONFIG4, tmp);
223 + // RdAccess time and Cycle time timing paraters configuration
225 + tmp |= GPMC_CONFIG5_PAGEBURSTACCESSTIME(1);
226 + tmp |= GPMC_CONFIG5_RDACCESSTIME(4);
227 + tmp |= GPMC_CONFIG5_WRCYCLETIME(5);
228 + tmp |= GPMC_CONFIG5_RDCYCLETIME(5);
229 + printk("GPMC_CONFIG5 reg: %x\n", tmp);
230 + gpmc_cs_write_reg(6, GPMC_CS_CONFIG5, tmp);
234 + // WrAccessTime WrDataOnADmuxBus, Cycle2Cycle, and BusTurnAround params
236 + tmp |= GPMC_CONFIG6_WRACCESSTIME(15);
237 + tmp |= GPMC_CONFIG6_WRDATAONADMUXBUS(3);
238 + tmp |= GPMC_CONFIG6_CYCLE2CYCLEDELAY(3);
239 + tmp |= GPMC_CONFIG6_CYCLE2CYCLESAMECSEN;
240 + tmp |= GPMC_CONFIG6_BUSTURNAROUND(0);
241 + printk("GPMC_CONFIG6 reg: %x\n", tmp);
242 + gpmc_cs_write_reg(6, GPMC_CS_CONFIG6, tmp);
247 static void __init overo_init(void)
249 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
250 @@ -637,8 +799,14 @@ static void __init overo_init(void)
251 usb_ehci_init(&ehci_pdata);
253 overo_init_smsc911x();
255 overo_display_init();
260 + spi_register_board_info(overo_mcspi_board_info,
261 + ARRAY_SIZE(overo_mcspi_board_info));
263 /* Ensure SDRC pins are mux'd for self-refresh */
264 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
265 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
266 @@ -680,7 +848,7 @@ static void __init overo_init(void)
267 "OVERO_GPIO_USBH_CPEN\n");
270 -MACHINE_START(OVERO, "Gumstsix Overo")
271 +MACHINE_START(OVERO, "Gumstix Overo")
272 .boot_params = 0x80000100,
273 .map_io = omap3_map_io,
274 .reserve = omap_reserve,
275 diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
276 index 4d073f1..8bd6dfb 100644
277 --- a/drivers/misc/Kconfig
278 +++ b/drivers/misc/Kconfig
279 @@ -452,6 +452,15 @@ config PCH_PHUB
280 To compile this driver as a module, choose M here: the module will
284 + tristate "USRP-E FPGA interface driver"
287 + This driver is for the Ettus Research USRP Embedded Software
288 + Defined Radio platform.
290 + If you do not plan to run this kernel on that hardware choose N.
292 source "drivers/misc/c2port/Kconfig"
293 source "drivers/misc/eeprom/Kconfig"
294 source "drivers/misc/cb710/Kconfig"
295 diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
296 index 98009cc..f43483e 100644
297 --- a/drivers/misc/Makefile
298 +++ b/drivers/misc/Makefile
299 @@ -35,6 +35,7 @@ obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
300 obj-$(CONFIG_C2PORT) += c2port/
301 obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
302 obj-$(CONFIG_HMC6352) += hmc6352.o
303 +obj-$(CONFIG_USRP_E) += usrp_e.o
306 obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
307 diff --git a/drivers/misc/usrp_e.c b/drivers/misc/usrp_e.c
309 index 0000000..2923b14
311 +++ b/drivers/misc/usrp_e.c
315 + * Interface for USRP Embedded from Ettus Research, LLC.
316 + * This driver uses the GPMC interface on the OMAP3 to pass data
317 + * to/from a Spartan 3 FPGA.
319 + * Copyright (C) Ettus Research, LLC
321 + * Written by Philip Balister <philip@opensdr.com>
323 + * This program is free software; you can redistribute it and/or modify
324 + * it under the terms of the GNU General Public License version 2 as
325 + * published by the Free Software Foundation.
328 +#include "linux/fs.h"
329 +#include "linux/module.h"
330 +#include "linux/cdev.h"
331 +#include "linux/device.h"
332 +#include "linux/spinlock.h"
333 +#include "linux/errno.h"
334 +#include "linux/irq.h"
335 +#include "linux/interrupt.h"
336 +#include "linux/wait.h"
337 +#include "linux/sched.h"
338 +#include "linux/dma-mapping.h"
339 +#include "linux/semaphore.h"
340 +#include "linux/kthread.h"
341 +#include "linux/poll.h"
342 +#include "linux/slab.h"
344 +#include "plat/gpmc.h"
345 +#include "plat/gpio.h"
346 +#include "plat/dma.h"
348 +#include "asm/uaccess.h"
350 +#include "asm/atomic.h"
352 +#include "linux/usrp_e.h"
354 +#define TX_SPACE_AVAILABLE_GPIO 144
355 +#define RX_DATA_READY_GPIO 146
357 +static atomic_t use_count = ATOMIC_INIT(0);
358 +static atomic_t mapped = ATOMIC_INIT(0);
359 +static int shutting_down;
366 + unsigned long mem_base;
367 + unsigned long control_mem_base;
370 + struct spi_regs_wb *ctl_spi;
371 + struct i2c_regs_wb *ctl_i2c;
372 + spinlock_t fpga_lock;
374 + atomic_t n_overruns;
375 + atomic_t n_underruns;
381 + struct omap_dma_channel_params params;
383 + unsigned long virt_from;
384 + unsigned long virt_to;
385 + unsigned long phys_from;
386 + unsigned long phys_to;
389 +#define MISC_REGS_BASE 0x0
391 +#define UE_REG_MISC_LED (MISC_REGS_BASE + 0)
393 +#define UE_REG_MISC_RX_LEN (MISC_REGS_BASE + 10)
394 +#define UE_REG_MISC_TX_LEN (MISC_REGS_BASE + 12)
396 +#define UE_REG_SLAVE(n) ((n)<<7)
397 +#define UE_REG_SR_ADDR(n) ((UE_REG_SLAVE(5)) + (4*(n)))
399 +#define UE_REG_CTRL_TX_CLEAR_UNDERRUN UE_REG_SR_ADDR(25)
400 +#define UE_SR_CLEAR_FIFO UE_REG_SR_ADDR(6)
402 +#define CTL_SPI_BASE 0x100
404 +struct spi_regs_wb {
414 +/* Defines for spi ctrl register */
415 +#define UE_SPI_CTRL_ASS (BIT(13))
416 +#define UE_SPI_CTRL_IE (BIT(12))
417 +#define UE_SPI_CTRL_LSB (BIT(11))
418 +/* defines for TXNEG and RXNEG in usrp_e.h so user can pass them to driver. */
419 +#define UE_SPI_CTRL_GO_BSY (BIT(8))
420 +#define UE_SPI_CTRL_CHAR_LEN_MASK 0x7f
423 +#define CTL_I2C_BASE 0x180
425 +struct i2c_regs_wb {
445 +struct i2c_regs_wb {
458 +#define I2C_CTRL_EN (BIT(7)) /* core enable */
459 +#define I2C_CTRL_IE (BIT(6)) /* interrupt enable */
461 +/* STA, STO, RD, WR, and IACK bits are cleared automatically */
463 +#define I2C_CMD_START (BIT(7))
464 +#define I2C_CMD_STOP (BIT(6))
465 +#define I2C_CMD_RD (BIT(5))
466 +#define I2C_CMD_WR (BIT(4))
467 +#define I2C_CMD_NACK (BIT(3))
468 +#define I2C_CMD_RSVD_2 (BIT(2))
469 +#define I2C_CMD_RSVD_1 (BIT(1))
470 +#define I2C_CMD_IACK (BIT(0))
472 +#define I2C_ST_RXACK (BIT(7))
473 +#define I2C_ST_BUSY (BIT(6))
474 +#define I2C_ST_AL (BIT(5))
475 +#define I2C_RSVD_4 (BIT(4))
476 +#define I2C_RSVD_3 (BIT(3))
477 +#define I2C_RSVD_2 (BIT(2))
478 +#define I2C_ST_TIP (BIT(1))
479 +#define I2C_ST_IP (BIT(0))
481 +#define MAX_WB_DIV 4
482 +#define MASTER_CLK_RATE 64000000
483 +#define PRESCALAR(wb_div) (((MASTER_CLK_RATE/(wb_div)) / (5 * 100000)) - 1)
485 +static __u16 prescalar_values[MAX_WB_DIV+1] = {
493 +static struct dma_data *rx_dma;
494 +static struct dma_data *tx_dma;
496 +struct ring_buffer_entry {
497 + unsigned long dma_addr;
501 +struct ring_buffer {
502 + struct ring_buffer_info (*rbi)[];
503 + struct ring_buffer_entry (*rbe)[];
505 + unsigned long (*pages)[];
508 +static struct ring_buffer tx_rb;
509 +static struct ring_buffer rx_rb;
511 +static struct usrp_e_ring_buffer_size_t rb_size;
513 +#define NUM_PAGES_RX_FLAGS 1
514 +#define NUM_RX_FRAMES 100
515 +#define NUM_PAGES_TX_FLAGS 1
516 +#define NUM_TX_FRAMES 100
518 +static int tx_rb_write;
519 +static int tx_rb_read;
520 +static int rx_rb_write;
521 +static int rx_rb_read;
523 +static int alloc_ring_buffer(struct ring_buffer *rb,
524 + unsigned int num_bufs, enum dma_data_direction direction);
525 +static void delete_ring_buffer(struct ring_buffer *rb,
526 + unsigned int num_bufs, enum dma_data_direction direction);
527 +static int alloc_ring_buffers(void);
528 +static void init_ring_buffer(struct ring_buffer *rb, int num_bufs,
529 + int init_flags, enum dma_data_direction direction);
531 +static dev_t usrp_e_dev_number;
532 +static struct class *usrp_e_class;
534 +#define DEVICE_NAME "usrp_e"
536 +static const struct file_operations usrp_e_fops;
538 +static irqreturn_t space_available_irqhandler(int irq, void *dev_id);
539 +static irqreturn_t data_ready_irqhandler(int irq, void *dev_id);
540 +static void usrp_rx_dma_irq(int ch, u16 stat, void *data);
541 +static void usrp_tx_dma_irq(int ch, u16 stat, void *data);
543 +static DECLARE_WAIT_QUEUE_HEAD(data_received_queue);
544 +static DECLARE_WAIT_QUEUE_HEAD(space_available_queue);
545 +static DECLARE_WAIT_QUEUE_HEAD(received_data_from_user);
546 +static DECLARE_WAIT_QUEUE_HEAD(tx_rb_space_available);
548 +static int tx_dma_waiting_for_data;
549 +static int waiting_for_space_in_tx_rb;
553 +static DEFINE_SEMAPHORE(dma_lock);
555 +static void usrp_e_spi_init(void);
556 +static void usrp_e_i2c_init(void);
558 +static int init_dma_controller(void);
559 +static void release_dma_controller(void);
560 +static int get_frame_from_fpga_start(void);
561 +static int get_frame_from_fpga_finish(void);
562 +static int send_frame_to_fpga_start(void);
563 +static int send_frame_to_fpga_finish(void);
565 +static int rx_dma_active;
566 +static int tx_dma_active;
572 + struct usrp_e_dev *p;
574 + printk(KERN_DEBUG "usrp_e entering driver initialization\n");
576 + if (alloc_chrdev_region(&usrp_e_dev_number, 0, 1, DEVICE_NAME) < 0) {
577 + printk(KERN_DEBUG "Can't register device\n");
581 + usrp_e_class = class_create(THIS_MODULE, DEVICE_NAME);
583 + usrp_e_devp = kzalloc(sizeof(struct usrp_e_dev), GFP_KERNEL);
584 + if (!usrp_e_devp) {
585 + printk(KERN_ERR "Bad kmalloc\n");
589 + p = usrp_e_devp; /* Shorten var name so I stay sane. */
591 + printk(KERN_DEBUG "usrp_e data struct malloc'd.\n");
593 + atomic_set(&p->n_underruns, 0);
594 + atomic_set(&p->n_overruns, 0);
596 + printk(KERN_DEBUG "usrp_e Data initialized..\n");
598 + cdev_init(&p->cdev, &usrp_e_fops);
599 + p->cdev.owner = THIS_MODULE;
601 + ret = cdev_add(&p->cdev, MKDEV(MAJOR(usrp_e_dev_number), 0), 1);
603 + printk(KERN_ERR "Bad cdev\n");
607 + printk(KERN_DEBUG "usrp_e major number : %d\n",
608 + MAJOR(usrp_e_dev_number));
609 + device_create(usrp_e_class, NULL, MKDEV(MAJOR(usrp_e_dev_number), 0),
610 + NULL, "usrp_e%d", 0);
612 + printk(KERN_DEBUG "Getting Chip Select\n");
614 + if (gpmc_cs_request(4, SZ_2K, &p->mem_base) < 0) {
615 + printk(KERN_ERR "Failed request for GPMC mem for usrp_e\n");
618 + printk(KERN_DEBUG "Got CS4, address = %lx\n", p->mem_base);
620 + if (!request_mem_region(p->mem_base, SZ_2K, "usrp_e")) {
621 + printk(KERN_ERR "Request_mem_region failed.\n");
626 + p->ioaddr = ioremap(p->mem_base, SZ_2K);
627 + spin_lock_init(&p->fpga_lock);
629 + if (gpmc_cs_request(6, SZ_2K, &p->control_mem_base) < 0) {
630 + printk(KERN_ERR "Failed request for GPMC control mem for usrp_e\n");
633 + printk(KERN_DEBUG "Got CS6, address = %lx\n", p->control_mem_base);
635 + if (!request_mem_region(p->control_mem_base, SZ_2K, "usrp_e_c")) {
636 + printk(KERN_ERR "Request_mem_region failed.\n");
641 + p->ctl_addr = ioremap_nocache(p->control_mem_base, SZ_2K);
643 + /* Initialize wishbone SPI and I2C interfaces */
648 + /* Configure GPIO's */
650 + if (!(((gpio_request(TX_SPACE_AVAILABLE_GPIO,
651 + "TX_SPACE_AVAILABLE_GPIO") == 0) &&
652 + (gpio_direction_input(TX_SPACE_AVAILABLE_GPIO) == 0)))) {
653 + printk(KERN_ERR "Could not claim GPIO for TX_SPACE_AVAILABLE_GPIO\n");
657 + if (!(((gpio_request(RX_DATA_READY_GPIO, "RX_DATA_READY_GPIO") == 0) &&
658 + (gpio_direction_input(RX_DATA_READY_GPIO) == 0)))) {
659 + printk(KERN_ERR "Could not claim GPIO for RX_DATA_READY_GPIO\n");
664 + if (!(((gpio_request(14, "Debug0") == 0) &&
665 + (gpio_direction_output(14, 0) == 0)))) {
666 + printk(KERN_ERR "Could not claim GPIO for Debug0\n");
670 + if (!(((gpio_request(21, "Debug1") == 0) &&
671 + (gpio_direction_output(21, 0) == 0)))) {
672 + printk(KERN_ERR "Could not claim GPIO for Debug1\n");
676 + if (!(((gpio_request(22, "Debug2") == 0) &&
677 + (gpio_direction_output(22, 0) == 0)))) {
678 + printk(KERN_ERR "Could not claim GPIO for Debug2\n");
682 + if (!(((gpio_request(23, "Debug3") == 0) &&
683 + (gpio_direction_output(23, 0) == 0)))) {
684 + printk(KERN_ERR "Could not claim GPIO for Debug3\n");
689 + rb_size.num_pages_rx_flags = NUM_PAGES_RX_FLAGS;
690 + rb_size.num_rx_frames = NUM_RX_FRAMES;
691 + rb_size.num_pages_tx_flags = NUM_PAGES_TX_FLAGS;
692 + rb_size.num_tx_frames = NUM_TX_FRAMES;
694 + ret = alloc_ring_buffers();
698 + /* Initialize various DMA related flags */
703 + printk(KERN_DEBUG "usrp_e Driver Initialized.\n");
709 +usrp_e_cleanup(void)
711 + struct usrp_e_dev *p = usrp_e_devp;
713 + unregister_chrdev_region(usrp_e_dev_number, 1);
715 + release_mem_region(p->mem_base, SZ_2K);
716 + release_mem_region(p->control_mem_base, SZ_2K);
718 + device_destroy(usrp_e_class, MKDEV(MAJOR(usrp_e_dev_number), 0));
719 + cdev_del(&p->cdev);
721 + class_destroy(usrp_e_class);
723 + iounmap(p->ioaddr);
724 + iounmap(p->ctl_addr);
729 + printk(KERN_DEBUG "Freeing gpios\n");
731 + gpio_free(TX_SPACE_AVAILABLE_GPIO);
732 + gpio_free(RX_DATA_READY_GPIO);
740 + delete_ring_buffer(&tx_rb, rb_size.num_tx_frames, DMA_TO_DEVICE);
741 + delete_ring_buffer(&rx_rb, rb_size.num_rx_frames, DMA_FROM_DEVICE);
745 + printk(KERN_DEBUG "Leaving cleanup\n");
749 +usrp_e_open(struct inode *inode, struct file *file)
751 + struct usrp_e_dev *p = usrp_e_devp;
754 + printk(KERN_DEBUG "usrp_e open called, use_count = %d\n",
755 + atomic_read(&use_count));
756 + if (atomic_add_return(1, &use_count) != 1) {
757 + printk(KERN_ERR "use_count = %d\n", atomic_read(&use_count));
758 + atomic_dec(&use_count);
762 + /* Clear rx and tx fifos in the fpga */
763 + writel(1, p->ctl_addr + UE_SR_CLEAR_FIFO);
764 + writel(2, p->ctl_addr + UE_SR_CLEAR_FIFO);
767 + usrp_e_devp = container_of(inode->i_cdev, struct usrp_e_dev, cdev);
769 + file->private_data = usrp_e_devp;
772 + ret = init_dma_controller();
786 + init_ring_buffer(&rx_rb, rb_size.num_rx_frames, RB_KERNEL, DMA_FROM_DEVICE);
787 + init_ring_buffer(&tx_rb, rb_size.num_tx_frames, RB_KERNEL, DMA_TO_DEVICE);
789 + /* Configure interrupts for GPIO pins */
791 + ret = request_irq(gpio_to_irq(TX_SPACE_AVAILABLE_GPIO),
792 + space_available_irqhandler,
793 + IRQF_TRIGGER_RISING, "usrp_e_space_available", NULL);
795 + ret = request_irq(gpio_to_irq(RX_DATA_READY_GPIO),
796 + data_ready_irqhandler,
797 + IRQF_TRIGGER_RISING, "usrp_e_data_ready", NULL);
799 + printk(KERN_DEBUG "usrp: leaving open\n");
804 +usrp_e_release(struct inode *inode, struct file *file)
806 + struct usrp_e_dev *usrp_e_devp = file->private_data;
808 + printk(KERN_DEBUG "usrp_e release called\n");
810 + if (atomic_read(&use_count) != 1) {
811 + printk(KERN_ERR "Attempt to close usrp_e driver that is not open");
815 + printk(KERN_DEBUG "Waiting for DMA to become inactive\n");
817 + while (tx_dma_active || rx_dma_active)
820 + /* Freeing gpio irq's */
821 + printk(KERN_DEBUG "Freeing gpio irq's\n");
823 + free_irq(gpio_to_irq(TX_SPACE_AVAILABLE_GPIO), NULL);
824 + free_irq(gpio_to_irq(RX_DATA_READY_GPIO), NULL);
826 + printk(KERN_DEBUG "Freeing DMA channels\n");
828 + release_dma_controller();
832 + atomic_dec(&use_count);
838 +usrp_e_read(struct file *file, char *buf, size_t count, loff_t *ppos)
846 + if (!((*rx_rb.rbi)[rx_rb_read].flags & RB_USER)) {
847 + if (wait_event_interruptible(data_received_queue,
848 + (((*rx_rb.rbi)[rx_rb_read].flags & RB_USER))))
852 + rx_pkt_len = (*rx_rb.rbi)[rx_rb_read].len;
853 + ret = copy_to_user(buf, (*rx_rb.rbe)[rx_rb_read].frame_addr, rx_pkt_len);
855 + (*rx_rb.rbi)[rx_rb_read].flags = RB_KERNEL;
858 + if (rx_rb_read == rb_size.num_rx_frames)
861 + get_frame_from_fpga_start();
863 + return ((ret == 0) ? rx_pkt_len : -ret);
867 +usrp_e_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
872 +// printk("In write.\n");
874 + /* Trigger a DMA transfer. Used to start transmit after writing */
875 + /* data into the transmit ring buffer from user space */
877 + send_frame_to_fpga_start();
884 +// printk("Write flags: %d\n", (*tx_rb.rbe)[tx_rb_write].flags);
885 + if (!((*tx_rb.rbi)[tx_rb_write].flags & RB_KERNEL)) {
886 + waiting_for_space_in_tx_rb = 1;
887 +// printk("Sleeping\n");
888 + if (wait_event_interruptible(tx_rb_space_available,
889 + ((*tx_rb.rbi)[tx_rb_write].flags & RB_KERNEL)))
891 +// printk("Waking\n");
894 + ret = copy_from_user((*tx_rb.rbe)[tx_rb_write].frame_addr, buf, count);
898 + (*tx_rb.rbi)[tx_rb_write].len = count;
899 + (*tx_rb.rbi)[tx_rb_write].flags = RB_USER;
902 + if (tx_rb_write == rb_size.num_tx_frames)
905 +// printk("Calling send_to_fpga_start from write\n");
907 + send_frame_to_fpga_start();
913 +usrp_e_llseek(struct file *file, loff_t offest, int orig)
915 + printk(KERN_DEBUG "usrp_e llseek called\n");
920 +static int usrp_e_ctl16(unsigned long arg, int direction)
922 + struct usrp_e_ctl16 __user *argp = (struct usrp_e_ctl16 __user *) arg;
924 + struct usrp_e_ctl16 ctl;
926 + if (copy_from_user(&ctl, argp, sizeof(struct usrp_e_ctl16)))
929 + if (ctl.count > 10)
932 + if (direction == 0) {
933 + for (i = 0; i < ctl.count; i++)
934 + writew(ctl.buf[i], &usrp_e_devp->ctl_addr \
936 + } else if (direction == 1) {
937 + for (i = 0; i < ctl.count; i++)
938 + ctl.buf[i] = readw(&usrp_e_devp->ctl_addr \
941 + if (copy_to_user(argp, &ctl, sizeof(struct usrp_e_ctl16)))
949 +static int usrp_e_ctl32(unsigned long arg, int direction)
951 + struct usrp_e_ctl32 __user *argp = (struct usrp_e_ctl32 __user *) arg;
953 + struct usrp_e_ctl32 ctl;
955 + if (copy_from_user(&ctl, argp, sizeof(struct usrp_e_ctl32)))
958 + if (ctl.count > 20)
961 + if (direction == 0) {
962 + for (i = 0; i < ctl.count; i++)
963 + writel(ctl.buf[i], &usrp_e_devp->ctl_addr \
965 + } else if (direction == 1) {
966 + for (i = 0; i < ctl.count; i++)
967 + ctl.buf[i] = readl(&usrp_e_devp->ctl_addr \
970 + if (copy_to_user(argp, &ctl, sizeof(struct usrp_e_ctl16)))
979 +static int usrp_e_get_rb_info(unsigned long arg)
981 + struct usrp_e_ring_buffer_size_t __user *argp = (struct usrp_e_ring_buffer_size_t __user *) arg;
984 + if (copy_to_user(argp, &rb_size, sizeof(rb_size)))
990 +static void usrp_e_spi_init()
992 + struct usrp_e_dev *p = usrp_e_devp;
994 + p->ctl_spi = (struct spi_regs_wb *)(p->ctl_addr + CTL_SPI_BASE);
995 + p->ctl_spi->div = 64; /* 1 = Div by 4 (12.5 MHz) */
998 +static int usrp_e_spi_wait(void)
1000 + struct usrp_e_dev *p = usrp_e_devp;
1002 + while (p->ctl_spi->ctrl & UE_SPI_CTRL_GO_BSY) {
1003 + if (signal_pending(current)) {
1004 + printk(KERN_DEBUG "Signal received.\n");
1005 + set_current_state(TASK_RUNNING);
1014 +static int usrp_e_spi(unsigned long __user arg)
1016 + struct usrp_e_dev *p = usrp_e_devp;
1017 + struct usrp_e_spi __user *argp = (struct usrp_e_spi __user *) arg;
1018 + struct usrp_e_spi spi_cmd;
1021 + if (copy_from_user(&spi_cmd, argp, sizeof(struct usrp_e_spi)))
1024 + spi_cmd.flags &= (UE_SPI_CTRL_TXNEG | UE_SPI_CTRL_RXNEG);
1025 + ctrl = UE_SPI_CTRL_ASS | (UE_SPI_CTRL_CHAR_LEN_MASK & spi_cmd.length) \
1028 + ret = usrp_e_spi_wait();
1032 + p->ctl_spi->ss = spi_cmd.slave & 0xff;
1034 + p->ctl_spi->txrx0 = spi_cmd.data;
1036 + p->ctl_spi->ctrl = ctrl;
1037 + p->ctl_spi->ctrl = ctrl | UE_SPI_CTRL_GO_BSY;
1039 + if (spi_cmd.readback) {
1040 + usrp_e_spi_wait();
1041 + if (copy_to_user(&argp->data, &p->ctl_spi->txrx0,
1051 +static void usrp_e_i2c_init()
1053 + struct usrp_e_dev *p = usrp_e_devp;
1056 + p->ctl_i2c = (struct i2c_regs_wb *)(p->ctl_addr + CTL_I2C_BASE);
1058 + writeb(0, &p->ctl_i2c->ctrl); /* disable core */
1060 + /* Assume wb_div is 4, deal with this later */
1062 + if (wb_div > MAX_WB_DIV)
1063 + wb_div = MAX_WB_DIV;
1065 + writeb((prescalar_values[wb_div] & 0xff), &p->ctl_i2c->prescalar_lo);
1066 + writeb(((prescalar_values[wb_div] >> 8) & 0xff),
1067 + &p->ctl_i2c->prescalar_hi);
1068 + writeb(I2C_CTRL_EN, &p->ctl_i2c->ctrl); /* enable core */
1071 +static int usrp_e_i2c_wait(__u32 mask, int chk_ack)
1073 + struct usrp_e_dev *p = usrp_e_devp;
1075 + while (readb(&p->ctl_i2c->cmd_status) & mask) {
1076 + if (signal_pending(current)) {
1077 + printk(KERN_DEBUG "Signal received.\n");
1078 + set_current_state(TASK_RUNNING);
1085 + if ((readb(&p->ctl_i2c->cmd_status) & I2C_ST_RXACK) == 0)
1094 +static int usrp_e_i2c(unsigned long arg, int direction)
1096 + struct usrp_e_dev *p = usrp_e_devp;
1097 + struct usrp_e_i2c __user *argp = (struct usrp_e_i2c __user *) arg;
1098 + struct usrp_e_i2c tmp;
1099 + struct usrp_e_i2c *i2c_msg;
1102 + if (copy_from_user(&tmp, argp, sizeof(struct usrp_e_i2c)))
1105 + i2c_msg = kmalloc(sizeof(struct usrp_e_i2c) + tmp.len, GFP_KERNEL);
1109 + if (copy_from_user(i2c_msg, argp,
1110 + (sizeof(struct usrp_e_i2c) + tmp.len)))
1115 + if (i2c_msg->len == 0)
1118 + usrp_e_i2c_wait(I2C_ST_BUSY, 0);
1120 + writeb(((i2c_msg->addr << 1) | 1), &p->ctl_i2c->data);
1121 + writeb((I2C_CMD_WR | I2C_CMD_START), &p->ctl_i2c->cmd_status);
1122 + ret = usrp_e_i2c_wait(I2C_ST_TIP, 1);
1125 + } else if (ret == 0) {
1126 + writeb(I2C_CMD_STOP, &p->ctl_i2c->cmd_status);
1130 + for (len = i2c_msg->len, i = 0; len > 0; i++, len--) {
1131 + writeb((I2C_CMD_RD | ((len == 1) ?
1132 + (I2C_CMD_NACK | I2C_CMD_STOP) : 0)),
1133 + &p->ctl_i2c->cmd_status);
1134 + usrp_e_i2c_wait(I2C_ST_TIP, 0);
1135 + i2c_msg->data[i] = readb(&p->ctl_i2c->data);
1137 + if (copy_to_user(argp, i2c_msg, (sizeof(struct usrp_e_i2c) +
1142 + usrp_e_i2c_wait(I2C_ST_BUSY, 0);
1143 + writeb(((i2c_msg->addr << 1) | 0), &p->ctl_i2c->data);
1144 + writeb((I2C_CMD_WR | I2C_CMD_START |
1145 + (i2c_msg->len == 0 ? I2C_CMD_STOP : 0)),
1146 + &p->ctl_i2c->cmd_status);
1147 + ret = usrp_e_i2c_wait(I2C_ST_TIP, 1);
1150 + } else if (ret == 0) {
1151 + writeb(I2C_CMD_STOP, &p->ctl_i2c->cmd_status);
1154 + for (len = i2c_msg->len, i = 0; len > 0; i++, len--) {
1155 + writeb(i2c_msg->data[i], &p->ctl_i2c->data);
1156 + writeb((I2C_CMD_WR | (len == 1 ? I2C_CMD_STOP : 0)),
1157 + &p->ctl_i2c->cmd_status);
1158 + ret = usrp_e_i2c_wait(I2C_ST_TIP, 1);
1161 + } else if (ret == 0) {
1162 + writeb(I2C_CMD_STOP, &p->ctl_i2c->cmd_status);
1173 +static int usrp_e_ioctl(struct file *file,
1174 + unsigned int cmd, unsigned long arg)
1178 + case USRP_E_WRITE_CTL16:
1179 + return usrp_e_ctl16(arg, 0);
1181 + case USRP_E_READ_CTL16:
1182 + return usrp_e_ctl16(arg, 1);
1184 + case USRP_E_WRITE_CTL32:
1185 + return usrp_e_ctl32(arg, 0);
1187 + case USRP_E_READ_CTL32:
1188 + return usrp_e_ctl32(arg, 1);
1191 + return usrp_e_spi(arg);
1193 + case USRP_E_I2C_WRITE:
1194 + return usrp_e_i2c(arg, 0);
1196 + case USRP_E_I2C_READ:
1197 + return usrp_e_i2c(arg, 1);
1199 + case USRP_E_GET_RB_INFO:
1200 + return usrp_e_get_rb_info(arg);
1209 +static unsigned int usrp_e_poll(struct file *filp, poll_table *wait)
1211 + unsigned int mask = 0;
1213 + poll_wait(filp, &data_received_queue, wait);
1214 + poll_wait(filp, &tx_rb_space_available, wait);
1216 + // Make sure write is active before sleeping
1217 + send_frame_to_fpga_start();
1219 + /* Make sure to read in case the rx ring buffer is full */
1220 + get_frame_from_fpga_start();
1222 + // This likely needs some locking. The pointer is incremented
1223 + // before the flag state is updated.
1225 + if (rx_rb_write == 0) {
1226 + if ((*rx_rb.rbi)[rb_size.num_rx_frames - 1].flags & RB_USER)
1227 + mask |= POLLIN | POLLRDNORM;
1229 + if ((*rx_rb.rbi)[rx_rb_write - 1].flags & RB_USER)
1230 + mask |= POLLIN | POLLRDNORM;
1233 + if (tx_rb_read == 0) {
1234 + if ((*tx_rb.rbi)[rb_size.num_tx_frames - 1].flags & RB_KERNEL)
1235 + mask |= POLLOUT | POLLWRNORM;
1237 + if ((*tx_rb.rbi)[tx_rb_read - 1].flags & RB_KERNEL)
1238 + mask |= POLLOUT | POLLWRNORM;
1245 +/* The mmap code is based on code in af_packet.c */
1247 +static void usrp_e_mm_open(struct vm_area_struct *vma)
1250 + atomic_inc(&mapped);
1253 +static void usrp_e_mm_close(struct vm_area_struct *vma)
1256 + atomic_dec(&mapped);
1259 +static const struct vm_operations_struct usrp_e_mmap_ops = {
1260 + .open = usrp_e_mm_open,
1261 + .close = usrp_e_mm_close,
1264 +static int usrp_e_mmap(struct file *filp, struct vm_area_struct *vma)
1266 + unsigned long size, expected_size;
1268 + unsigned long start;
1270 + struct page *page;
1272 + printk("In mmap\n");
1274 + if (vma->vm_pgoff)
1277 + /* Verify the user will map the entire tx and rx ring buffer space */
1278 + expected_size = (rb_size.num_rx_frames + rb_size.num_tx_frames) * (PAGE_SIZE >> 1)
1279 + + (rb_size.num_pages_rx_flags + rb_size.num_pages_tx_flags) * PAGE_SIZE;
1281 + size = vma->vm_end - vma->vm_start;
1282 + printk(KERN_DEBUG "Size = %d, expected sixe = %d\n", size, expected_size);
1284 + if (size != expected_size)
1287 + start = vma->vm_start;
1289 + page = virt_to_page(rx_rb.rbi);
1290 + err = vm_insert_page(vma, start, page);
1294 + start += PAGE_SIZE;
1296 + for (i = 0; i < rx_rb.num_pages; ++i) {
1297 + struct page *page = virt_to_page((*rx_rb.pages)[i]);
1298 + err = vm_insert_page(vma, start, page);
1302 + start += PAGE_SIZE;
1305 + page = virt_to_page(tx_rb.rbi);
1306 + err = vm_insert_page(vma, start, page);
1310 + start += PAGE_SIZE;
1312 + for (i = 0; i < tx_rb.num_pages; ++i) {
1313 + struct page *page = virt_to_page((*tx_rb.pages)[i]);
1315 + err = vm_insert_page(vma, start, page);
1319 + start += PAGE_SIZE;
1322 + vma->vm_ops = &usrp_e_mmap_ops;
1327 +static const struct file_operations usrp_e_fops = {
1328 + .owner = THIS_MODULE,
1329 + .open = usrp_e_open,
1330 + .release = usrp_e_release,
1331 + .read = usrp_e_read,
1332 + .write = usrp_e_write,
1333 + .llseek = usrp_e_llseek,
1334 + .unlocked_ioctl = usrp_e_ioctl,
1335 + .poll = usrp_e_poll,
1336 + .mmap = usrp_e_mmap,
1339 +MODULE_VERSION("0.1");
1340 +MODULE_ALIAS(DEVICE_NAME);
1341 +MODULE_DESCRIPTION(DEVICE_NAME);
1342 +MODULE_AUTHOR("Philip Balister <philip@opensdr.com>");
1343 +MODULE_LICENSE("GPL v2");
1345 +module_init(usrp_e_init);
1346 +module_exit(usrp_e_cleanup);
1348 +static irqreturn_t space_available_irqhandler(int irq, void *dev_id)
1350 + int serviced = IRQ_NONE;
1353 + gpio_set_value(22, 1);
1356 +// printk("Calling send_to_fpga_start from space_available irq\n");
1357 + send_frame_to_fpga_start();
1359 + serviced = IRQ_HANDLED;
1362 + gpio_set_value(22, 0);
1368 +static void usrp_rx_dma_irq(int ch, u16 stat, void *data)
1372 + gpio_set_value(23, 1);
1375 + rx_dma_active = 0;
1377 + get_frame_from_fpga_finish();
1380 + gpio_set_value(23, 0);
1384 +static void usrp_tx_dma_irq(int ch, u16 stat, void *data)
1388 + gpio_set_value(23, 1);
1390 + tx_dma_active = 0;
1392 + send_frame_to_fpga_finish();
1395 + gpio_set_value(23, 0);
1399 + gpio_set_value(21, 1);
1400 + gpio_set_value(21, 0);
1405 +static irqreturn_t data_ready_irqhandler(int irq, void *dev_id)
1407 + int serviced = IRQ_NONE;
1410 + gpio_set_value(22, 1);
1413 + get_frame_from_fpga_start();
1415 + serviced = IRQ_HANDLED;
1418 + gpio_set_value(22, 0);
1423 +static int init_dma_controller()
1425 + struct usrp_e_dev *p = usrp_e_devp;
1427 + rx_dma = kzalloc(sizeof(struct dma_data), GFP_KERNEL);
1429 + printk(KERN_ERR "Failed to allocate memory for rx_dma struct.");
1433 + if (omap_request_dma(OMAP_DMA_NO_DEVICE, "usrp-e-rx",
1434 + usrp_rx_dma_irq, (void *) rx_dma, &rx_dma->ch)) {
1435 + printk(KERN_ERR "Could not get rx DMA channel for usrp_e\n");
1438 + printk(KERN_DEBUG "rx_dma->ch %d\n", rx_dma->ch);
1440 + rx_dma->phys_from = p->mem_base;
1442 + memset(&rx_dma->params, 0, sizeof(rx_dma->params));
1443 + rx_dma->params.data_type = OMAP_DMA_DATA_TYPE_S16;
1445 + rx_dma->params.src_amode = OMAP_DMA_AMODE_CONSTANT;
1446 + rx_dma->params.dst_amode = OMAP_DMA_AMODE_POST_INC;
1448 + rx_dma->params.src_start = p->mem_base;
1449 + rx_dma->params.dst_start = rx_dma->phys_to;
1451 + rx_dma->params.src_ei = 1;
1452 + rx_dma->params.src_fi = 1;
1453 + rx_dma->params.dst_ei = 1;
1454 + rx_dma->params.dst_fi = 1;
1456 + rx_dma->params.elem_count = 1024;
1457 + rx_dma->params.frame_count = 1;
1459 + rx_dma->params.read_prio = DMA_CH_PRIO_HIGH;
1460 + rx_dma->params.write_prio = DMA_CH_PRIO_LOW;
1462 + omap_set_dma_params(rx_dma->ch, &rx_dma->params);
1464 +// Play with these with a real application
1465 +//G omap_set_dma_src_burst_mode(rx_dma->ch, OMAP_DMA_DATA_BURST_16);
1466 +// omap_set_dma_dest_burst_mode(rx_dma->ch, OMAP_DMA_DATA_BURST_16);
1468 +#if 0 // Need to find implentations of the endian calls
1469 + omap_set_dma_src_endian_type(rx_dma->ch, OMAP_DMA_BIG_ENDIAN);
1470 + omap_set_dma_dst_endian_type(rx_dma->ch, OMAP_DMA_LITTLE_ENDIAN);
1473 + tx_dma = kzalloc(sizeof(struct dma_data), GFP_KERNEL);
1475 + printk(KERN_ERR "Failed to allocate memory for tx_dma struct.");
1479 + if (omap_request_dma(OMAP_DMA_NO_DEVICE, "usrp-e-tx",
1480 + usrp_tx_dma_irq, (void *) tx_dma, &tx_dma->ch)) {
1481 + printk(KERN_ERR "Could not get tx DMA channel for usrp_e\n");
1485 + printk(KERN_DEBUG "tx_dma->ch %d\n", tx_dma->ch);
1487 + tx_dma->phys_from = p->mem_base;
1489 + memset(&tx_dma->params, 0, sizeof(tx_dma->params));
1490 + tx_dma->params.data_type = OMAP_DMA_DATA_TYPE_S16;
1492 + tx_dma->params.src_amode = OMAP_DMA_AMODE_POST_INC;
1493 + tx_dma->params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
1495 + tx_dma->params.src_start = tx_dma->phys_from;
1496 + tx_dma->params.dst_start = p->mem_base;
1498 + tx_dma->params.src_ei = 1;
1499 + tx_dma->params.src_fi = 1;
1500 + tx_dma->params.dst_ei = 1;
1501 + tx_dma->params.dst_fi = 1;
1503 + tx_dma->params.elem_count = 1024;
1504 + tx_dma->params.frame_count = 1;
1506 + tx_dma->params.read_prio = DMA_CH_PRIO_LOW;
1507 + tx_dma->params.write_prio = DMA_CH_PRIO_HIGH;
1509 + omap_set_dma_params(tx_dma->ch, &tx_dma->params);
1511 +// Play with these with a real application
1512 +//G omap_set_dma_src_burst_mode(tx_dma->ch, OMAP_DMA_DATA_BURST_16);
1513 +// omap_set_dma_dest_burst_mode(tx_dma->ch, OMAP_DMA_DATA_BURST_16);
1518 +static void release_dma_controller()
1521 + omap_free_dma(rx_dma->ch);
1522 + omap_free_dma(tx_dma->ch);
1528 +static int get_frame_from_fpga_start()
1530 + struct usrp_e_dev *p = usrp_e_devp;
1531 + struct ring_buffer_info *rbi = &(*rx_rb.rbi)[rx_rb_write];
1532 + struct ring_buffer_entry *rbe = &(*rx_rb.rbe)[rx_rb_write];
1533 + u16 elements_to_read;
1535 + /* Check for space available in the ring buffer */
1536 + /* If no space, drop data. A read call will restart dma transfers. */
1537 + if (((*rx_rb.rbi)[rx_rb_write].flags & RB_KERNEL) && (gpio_get_value(RX_DATA_READY_GPIO)) && !rx_dma_active && !shutting_down) {
1539 + rx_dma_active = 1;
1541 + gpio_set_value(14, 1);
1544 + elements_to_read = readw(p->ctl_addr + UE_REG_MISC_RX_LEN);
1546 + rbi->flags = RB_DMA_ACTIVE;
1548 + rbi->len = elements_to_read << 1;
1550 + omap_set_dma_dest_addr_size(rx_dma->ch, rbe->dma_addr,
1551 + elements_to_read);
1553 + dma_sync_single_for_device(NULL, rbe->dma_addr, SZ_2K, DMA_FROM_DEVICE);
1555 + omap_start_dma(rx_dma->ch);
1562 +static int get_frame_from_fpga_finish()
1564 + dma_sync_single_for_cpu(NULL, (*rx_rb.rbe)[rx_rb_write].dma_addr, SZ_2K, DMA_FROM_DEVICE);
1566 + (*rx_rb.rbi)[rx_rb_write].flags = RB_USER;
1568 + if (rx_rb_write == rb_size.num_rx_frames)
1571 + wake_up_interruptible(&data_received_queue);
1573 + rx_dma_active = 0;
1575 + get_frame_from_fpga_start();
1578 + gpio_set_value(14, 0);
1584 +static int send_frame_to_fpga_start()
1586 + struct usrp_e_dev *p = usrp_e_devp;
1587 + struct ring_buffer_info *rbi = &(*tx_rb.rbi)[tx_rb_read];
1588 + struct ring_buffer_entry *rbe = &(*tx_rb.rbe)[tx_rb_read];
1589 + u16 elements_to_write;
1591 +// printk("In send_frame_to_fpga_start.\n");
1593 + /* Check if there is data to write to the FPGA, if so send it */
1594 + /* Otherwise, do nothing. Process is restarted by calls to write */
1595 + if (((*tx_rb.rbi)[tx_rb_read].flags & RB_USER) && !tx_dma_active && (gpio_get_value(TX_SPACE_AVAILABLE_GPIO)) && !shutting_down) {
1596 +// printk("In send_frame_to_fpga_start, past if.\n");
1597 + tx_dma_active = 1;
1599 + gpio_set_value(14, 1);
1602 + elements_to_write = ((rbi->len) >> 1);
1604 + writew(elements_to_write, p->ctl_addr + UE_REG_MISC_TX_LEN);
1606 + rbi->flags = RB_DMA_ACTIVE;
1608 + omap_set_dma_src_addr_size(tx_dma->ch, rbe->dma_addr,
1609 + elements_to_write);
1611 + dma_sync_single_for_device(NULL, rbe->dma_addr, SZ_2K, DMA_TO_DEVICE);
1613 + omap_start_dma(tx_dma->ch);
1619 +static int send_frame_to_fpga_finish()
1622 + dma_sync_single_for_cpu(NULL, (*tx_rb.rbe)[tx_rb_read].dma_addr, SZ_2K, DMA_TO_DEVICE);
1624 + (*tx_rb.rbi)[tx_rb_read].flags = RB_KERNEL;
1627 + if (tx_rb_read == rb_size.num_tx_frames)
1630 + wake_up_interruptible(&tx_rb_space_available);
1632 + tx_dma_active = 0;
1634 + send_frame_to_fpga_start();
1637 + gpio_set_value(14, 0);
1643 +static int alloc_ring_buffer(struct ring_buffer *rb,
1644 + unsigned int num_bufs, enum dma_data_direction direction)
1648 + rb->rbi = __get_free_page(GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN);
1650 + rb->rbe = kzalloc(sizeof(struct ring_buffer_entry) * num_bufs, GFP_KERNEL);
1652 + printk(KERN_ERR "Failed to allocate memory for rb entries\n");
1656 + rb->num_pages = (num_bufs & 1) ? ((num_bufs + 1) / 2) : (num_bufs / 2);
1658 + rb->pages = kzalloc(sizeof(unsigned long) * rb->num_pages, GFP_KERNEL);
1659 + if (!(rb->pages)) {
1660 + printk(KERN_ERR "Failed to allocate memory for rb page entries\n");
1664 + for (i = 0; i < rb->num_pages; i++) {
1665 + (*rb->pages)[i] = __get_free_page(GFP_KERNEL | __GFP_DMA | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN);
1667 + (*(rb->rbe))[i*2].frame_addr =
1668 + (*(rb->pages))[i];
1669 + (*(rb->rbe))[i*2 + 1].frame_addr =
1670 + ((*(rb->pages))[i] + SZ_2K);
1671 + if (!(*(rb->rbe))[i*2].frame_addr || !(*(rb->rbe))[i*2 + 1].frame_addr) {
1672 + printk(KERN_ERR "Failed to allocate memory dma buf\n");
1676 + (*(rb->rbe))[i*2].dma_addr = dma_map_single(NULL, (*(rb->rbe))[i*2].frame_addr, SZ_2K, direction);
1677 + (*(rb->rbe))[i*2 + 1].dma_addr = dma_map_single(NULL, (*(rb->rbe))[i*2 + 1].frame_addr, SZ_2K, direction);
1678 + if (!(*(rb->rbe))[i*2].dma_addr || !(*(rb->rbe))[i*2 + 1].dma_addr) {
1679 + printk(KERN_ERR "Failed to get physical address for dma buf\n");
1687 +static void delete_ring_buffer(struct ring_buffer *rb,
1688 + unsigned int num_bufs, enum dma_data_direction direction)
1691 + unsigned int num_pages;
1693 + printk(KERN_DEBUG "Entering delete_ring_buffer\n");
1695 + num_pages = (num_bufs & 1) ? ((num_bufs + 1) / 2) : (num_bufs / 2);
1697 + for (i = 0; i < num_pages; i++) {
1698 + dma_unmap_single(NULL, (*rb->rbe)[i*2].dma_addr, SZ_2K, direction);
1699 + dma_unmap_single(NULL, (*rb->rbe)[i*2 + 1].dma_addr, SZ_2K, direction);
1700 + free_page((*rb->pages)[i]);
1703 + free_page(rb->rbi);
1708 + printk(KERN_DEBUG "Leaving delete_ring_buffer\n");
1711 +static int alloc_ring_buffers()
1714 + if (alloc_ring_buffer(&tx_rb, rb_size.num_rx_frames, DMA_TO_DEVICE) < 0)
1716 + if (alloc_ring_buffer(&rx_rb, rb_size.num_tx_frames, DMA_FROM_DEVICE) < 0)
1722 +static void init_ring_buffer(struct ring_buffer *rb, int num_bufs,
1723 + int initial_flags, enum dma_data_direction direction)
1727 + for (i = 0; i < num_bufs; i++) {
1728 + dma_sync_single_for_device(NULL, (*rb->rbe)[i].dma_addr,
1729 + SZ_2K, direction);
1730 + dma_sync_single_for_cpu(NULL, (*rb->rbe)[i].dma_addr,
1731 + SZ_2K, direction);
1732 + (*rb->rbi)[i].flags = initial_flags;
1738 +static int tx_dma_func(void *data)
1741 + struct sched_param s = { .sched_priority = 1};
1743 + printk(KERN_DEBUG "In tx_dma_func\n");
1745 + allow_signal(SIGSTOP);
1747 + sched_setscheduler(current, SCHED_FIFO, &s);
1749 + while (!kthread_should_stop() && !closing_driver) {
1751 + if (!((*tx_rb.rbe)[tx_rb_read].flags & RB_USER)) {
1752 + tx_dma_waiting_for_data = 1;
1753 + ret = wait_event_interruptible(received_data_from_user,
1754 + (*tx_rb.rbe)[tx_rb_read].flags & RB_USER);
1757 + "tx_dma_func received signal %d\n",
1759 + if (closing_driver)
1763 + tx_dma_waiting_for_data = 0;
1766 + if (wait_event_interruptible(space_available_queue,
1767 + gpio_get_value(TX_SPACE_AVAILABLE_GPIO))) {
1768 + printk(KERN_DEBUG "tx_dma received signal waiting for space.\n");
1769 + if (closing_driver)
1773 + if (send_frame_to_fpga(&(*tx_rb.rbe)[tx_rb_read]) != 0) {
1774 + printk(KERN_DEBUG "send_frame received signal.\n");
1775 + if (closing_driver)
1779 + (*tx_rb.rbe)[tx_rb_read].flags = RB_KERNEL;
1782 + if (tx_rb_read == RB_SIZE)
1786 + if (waiting_for_space_in_tx_rb)
1788 + wake_up_interruptible(&tx_rb_space_queue);
1794 diff --git a/include/linux/usrp_e.h b/include/linux/usrp_e.h
1795 new file mode 100644
1796 index 0000000..e52f709
1798 +++ b/include/linux/usrp_e.h
1802 + * Copyright (C) 2010 Ettus Research, LLC
1804 + * Written by Philip Balister <philip@opensdr.com>
1806 + * This program is free software; you can redistribute it and/or modify
1807 + * it under the terms of the GNU General Public License as published by
1808 + * the Free Software Foundation; either version 2 of the License, or
1809 + * (at your option) any later version.
1815 +#include <linux/types.h>
1816 +#include <linux/ioctl.h>
1818 +struct usrp_e_ctl16 {
1824 +struct usrp_e_ctl32 {
1830 +/* SPI interface */
1832 +#define UE_SPI_TXONLY 0
1833 +#define UE_SPI_TXRX 1
1835 +/* Defines for spi ctrl register */
1836 +#define UE_SPI_CTRL_TXNEG (BIT(10))
1837 +#define UE_SPI_CTRL_RXNEG (BIT(9))
1839 +#define UE_SPI_PUSH_RISE 0
1840 +#define UE_SPI_PUSH_FALL UE_SPI_CTRL_TXNEG
1841 +#define UE_SPI_LATCH_RISE 0
1842 +#define UE_SPI_LATCH_FALL UE_SPI_CTRL_RXNEG
1844 +struct usrp_e_spi {
1852 +struct usrp_e_i2c {
1858 +#define USRP_E_IOC_MAGIC 'u'
1859 +#define USRP_E_WRITE_CTL16 _IOW(USRP_E_IOC_MAGIC, 0x20, struct usrp_e_ctl16)
1860 +#define USRP_E_READ_CTL16 _IOWR(USRP_E_IOC_MAGIC, 0x21, struct usrp_e_ctl16)
1861 +#define USRP_E_WRITE_CTL32 _IOW(USRP_E_IOC_MAGIC, 0x22, struct usrp_e_ctl32)
1862 +#define USRP_E_READ_CTL32 _IOWR(USRP_E_IOC_MAGIC, 0x23, struct usrp_e_ctl32)
1863 +#define USRP_E_SPI _IOWR(USRP_E_IOC_MAGIC, 0x24, struct usrp_e_spi)
1864 +#define USRP_E_I2C_READ _IOWR(USRP_E_IOC_MAGIC, 0x25, struct usrp_e_i2c)
1865 +#define USRP_E_I2C_WRITE _IOW(USRP_E_IOC_MAGIC, 0x26, struct usrp_e_i2c)
1866 +#define USRP_E_GET_RB_INFO _IOR(USRP_E_IOC_MAGIC, 0x27, struct usrp_e_ring_buffer_size_t)
1869 +#define RB_USER (BIT(0))
1870 +#define RB_KERNEL (BIT(1))
1871 +#define RB_OVERRUN (BIT(2))
1872 +#define RB_DMA_ACTIVE (BIT(3))
1874 +struct ring_buffer_info {
1879 +struct usrp_e_ring_buffer_size_t {
1880 + int num_pages_rx_flags;
1881 + int num_rx_frames;
1882 + int num_pages_tx_flags;
1883 + int num_tx_frames;