Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[pandora-kernel.git] / drivers / tty / tty_buffer.c
1 /*
2  * Tty buffer allocation management
3  */
4
5 #include <linux/types.h>
6 #include <linux/errno.h>
7 #include <linux/tty.h>
8 #include <linux/tty_driver.h>
9 #include <linux/tty_flip.h>
10 #include <linux/timer.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/wait.h>
16 #include <linux/bitops.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19
20 /**
21  *      tty_buffer_free_all             -       free buffers used by a tty
22  *      @tty: tty to free from
23  *
24  *      Remove all the buffers pending on a tty whether queued with data
25  *      or in the free ring. Must be called when the tty is no longer in use
26  *
27  *      Locking: none
28  */
29
30 void tty_buffer_free_all(struct tty_struct *tty)
31 {
32         struct tty_buffer *thead;
33         while ((thead = tty->buf.head) != NULL) {
34                 tty->buf.head = thead->next;
35                 kfree(thead);
36         }
37         while ((thead = tty->buf.free) != NULL) {
38                 tty->buf.free = thead->next;
39                 kfree(thead);
40         }
41         tty->buf.tail = NULL;
42         tty->buf.memory_used = 0;
43 }
44
45 /**
46  *      tty_buffer_alloc        -       allocate a tty buffer
47  *      @tty: tty device
48  *      @size: desired size (characters)
49  *
50  *      Allocate a new tty buffer to hold the desired number of characters.
51  *      Return NULL if out of memory or the allocation would exceed the
52  *      per device queue
53  *
54  *      Locking: Caller must hold tty->buf.lock
55  */
56
57 static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
58 {
59         struct tty_buffer *p;
60
61         if (tty->buf.memory_used + size > 65536)
62                 return NULL;
63         p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
64         if (p == NULL)
65                 return NULL;
66         p->used = 0;
67         p->size = size;
68         p->next = NULL;
69         p->commit = 0;
70         p->read = 0;
71         p->char_buf_ptr = (char *)(p->data);
72         p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
73         tty->buf.memory_used += size;
74         return p;
75 }
76
77 /**
78  *      tty_buffer_free         -       free a tty buffer
79  *      @tty: tty owning the buffer
80  *      @b: the buffer to free
81  *
82  *      Free a tty buffer, or add it to the free list according to our
83  *      internal strategy
84  *
85  *      Locking: Caller must hold tty->buf.lock
86  */
87
88 static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
89 {
90         /* Dumb strategy for now - should keep some stats */
91         tty->buf.memory_used -= b->size;
92         WARN_ON(tty->buf.memory_used < 0);
93
94         if (b->size >= 512)
95                 kfree(b);
96         else {
97                 b->next = tty->buf.free;
98                 tty->buf.free = b;
99         }
100 }
101
102 /**
103  *      __tty_buffer_flush              -       flush full tty buffers
104  *      @tty: tty to flush
105  *
106  *      flush all the buffers containing receive data. Caller must
107  *      hold the buffer lock and must have ensured no parallel flush to
108  *      ldisc is running.
109  *
110  *      Locking: Caller must hold tty->buf.lock
111  */
112
113 static void __tty_buffer_flush(struct tty_struct *tty)
114 {
115         struct tty_buffer *thead;
116
117         while ((thead = tty->buf.head) != NULL) {
118                 tty->buf.head = thead->next;
119                 tty_buffer_free(tty, thead);
120         }
121         tty->buf.tail = NULL;
122 }
123
124 /**
125  *      tty_buffer_flush                -       flush full tty buffers
126  *      @tty: tty to flush
127  *
128  *      flush all the buffers containing receive data. If the buffer is
129  *      being processed by flush_to_ldisc then we defer the processing
130  *      to that function
131  *
132  *      Locking: none
133  */
134
135 void tty_buffer_flush(struct tty_struct *tty)
136 {
137         unsigned long flags;
138         spin_lock_irqsave(&tty->buf.lock, flags);
139
140         /* If the data is being pushed to the tty layer then we can't
141            process it here. Instead set a flag and the flush_to_ldisc
142            path will process the flush request before it exits */
143         if (test_bit(TTY_FLUSHING, &tty->flags)) {
144                 set_bit(TTY_FLUSHPENDING, &tty->flags);
145                 spin_unlock_irqrestore(&tty->buf.lock, flags);
146                 wait_event(tty->read_wait,
147                                 test_bit(TTY_FLUSHPENDING, &tty->flags) == 0);
148                 return;
149         } else
150                 __tty_buffer_flush(tty);
151         spin_unlock_irqrestore(&tty->buf.lock, flags);
152 }
153
154 /**
155  *      tty_buffer_find         -       find a free tty buffer
156  *      @tty: tty owning the buffer
157  *      @size: characters wanted
158  *
159  *      Locate an existing suitable tty buffer or if we are lacking one then
160  *      allocate a new one. We round our buffers off in 256 character chunks
161  *      to get better allocation behaviour.
162  *
163  *      Locking: Caller must hold tty->buf.lock
164  */
165
166 static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
167 {
168         struct tty_buffer **tbh = &tty->buf.free;
169         while ((*tbh) != NULL) {
170                 struct tty_buffer *t = *tbh;
171                 if (t->size >= size) {
172                         *tbh = t->next;
173                         t->next = NULL;
174                         t->used = 0;
175                         t->commit = 0;
176                         t->read = 0;
177                         tty->buf.memory_used += t->size;
178                         return t;
179                 }
180                 tbh = &((*tbh)->next);
181         }
182         /* Round the buffer size out */
183         size = (size + 0xFF) & ~0xFF;
184         return tty_buffer_alloc(tty, size);
185         /* Should possibly check if this fails for the largest buffer we
186            have queued and recycle that ? */
187 }
188
189 /**
190  *      tty_buffer_request_room         -       grow tty buffer if needed
191  *      @tty: tty structure
192  *      @size: size desired
193  *
194  *      Make at least size bytes of linear space available for the tty
195  *      buffer. If we fail return the size we managed to find.
196  *
197  *      Locking: Takes tty->buf.lock
198  */
199 int tty_buffer_request_room(struct tty_struct *tty, size_t size)
200 {
201         struct tty_buffer *b, *n;
202         int left;
203         unsigned long flags;
204
205         spin_lock_irqsave(&tty->buf.lock, flags);
206
207         /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
208            remove this conditional if its worth it. This would be invisible
209            to the callers */
210         if ((b = tty->buf.tail) != NULL)
211                 left = b->size - b->used;
212         else
213                 left = 0;
214
215         if (left < size) {
216                 /* This is the slow path - looking for new buffers to use */
217                 if ((n = tty_buffer_find(tty, size)) != NULL) {
218                         if (b != NULL) {
219                                 b->next = n;
220                                 b->commit = b->used;
221                         } else
222                                 tty->buf.head = n;
223                         tty->buf.tail = n;
224                 } else
225                         size = left;
226         }
227
228         spin_unlock_irqrestore(&tty->buf.lock, flags);
229         return size;
230 }
231 EXPORT_SYMBOL_GPL(tty_buffer_request_room);
232
233 /**
234  *      tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
235  *      @tty: tty structure
236  *      @chars: characters
237  *      @flag: flag value for each character
238  *      @size: size
239  *
240  *      Queue a series of bytes to the tty buffering. All the characters
241  *      passed are marked with the supplied flag. Returns the number added.
242  *
243  *      Locking: Called functions may take tty->buf.lock
244  */
245
246 int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
247                 const unsigned char *chars, char flag, size_t size)
248 {
249         int copied = 0;
250         do {
251                 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
252                 int space = tty_buffer_request_room(tty, goal);
253                 struct tty_buffer *tb = tty->buf.tail;
254                 /* If there is no space then tb may be NULL */
255                 if (unlikely(space == 0))
256                         break;
257                 memcpy(tb->char_buf_ptr + tb->used, chars, space);
258                 memset(tb->flag_buf_ptr + tb->used, flag, space);
259                 tb->used += space;
260                 copied += space;
261                 chars += space;
262                 /* There is a small chance that we need to split the data over
263                    several buffers. If this is the case we must loop */
264         } while (unlikely(size > copied));
265         return copied;
266 }
267 EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
268
269 /**
270  *      tty_insert_flip_string_flags    -       Add characters to the tty buffer
271  *      @tty: tty structure
272  *      @chars: characters
273  *      @flags: flag bytes
274  *      @size: size
275  *
276  *      Queue a series of bytes to the tty buffering. For each character
277  *      the flags array indicates the status of the character. Returns the
278  *      number added.
279  *
280  *      Locking: Called functions may take tty->buf.lock
281  */
282
283 int tty_insert_flip_string_flags(struct tty_struct *tty,
284                 const unsigned char *chars, const char *flags, size_t size)
285 {
286         int copied = 0;
287         do {
288                 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
289                 int space = tty_buffer_request_room(tty, goal);
290                 struct tty_buffer *tb = tty->buf.tail;
291                 /* If there is no space then tb may be NULL */
292                 if (unlikely(space == 0))
293                         break;
294                 memcpy(tb->char_buf_ptr + tb->used, chars, space);
295                 memcpy(tb->flag_buf_ptr + tb->used, flags, space);
296                 tb->used += space;
297                 copied += space;
298                 chars += space;
299                 flags += space;
300                 /* There is a small chance that we need to split the data over
301                    several buffers. If this is the case we must loop */
302         } while (unlikely(size > copied));
303         return copied;
304 }
305 EXPORT_SYMBOL(tty_insert_flip_string_flags);
306
307 /**
308  *      tty_schedule_flip       -       push characters to ldisc
309  *      @tty: tty to push from
310  *
311  *      Takes any pending buffers and transfers their ownership to the
312  *      ldisc side of the queue. It then schedules those characters for
313  *      processing by the line discipline.
314  *
315  *      Locking: Takes tty->buf.lock
316  */
317
318 void tty_schedule_flip(struct tty_struct *tty)
319 {
320         unsigned long flags;
321         spin_lock_irqsave(&tty->buf.lock, flags);
322         if (tty->buf.tail != NULL)
323                 tty->buf.tail->commit = tty->buf.tail->used;
324         spin_unlock_irqrestore(&tty->buf.lock, flags);
325         schedule_work(&tty->buf.work);
326 }
327 EXPORT_SYMBOL(tty_schedule_flip);
328
329 /**
330  *      tty_prepare_flip_string         -       make room for characters
331  *      @tty: tty
332  *      @chars: return pointer for character write area
333  *      @size: desired size
334  *
335  *      Prepare a block of space in the buffer for data. Returns the length
336  *      available and buffer pointer to the space which is now allocated and
337  *      accounted for as ready for normal characters. This is used for drivers
338  *      that need their own block copy routines into the buffer. There is no
339  *      guarantee the buffer is a DMA target!
340  *
341  *      Locking: May call functions taking tty->buf.lock
342  */
343
344 int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
345                                                                 size_t size)
346 {
347         int space = tty_buffer_request_room(tty, size);
348         if (likely(space)) {
349                 struct tty_buffer *tb = tty->buf.tail;
350                 *chars = tb->char_buf_ptr + tb->used;
351                 memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
352                 tb->used += space;
353         }
354         return space;
355 }
356 EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
357
358 /**
359  *      tty_prepare_flip_string_flags   -       make room for characters
360  *      @tty: tty
361  *      @chars: return pointer for character write area
362  *      @flags: return pointer for status flag write area
363  *      @size: desired size
364  *
365  *      Prepare a block of space in the buffer for data. Returns the length
366  *      available and buffer pointer to the space which is now allocated and
367  *      accounted for as ready for characters. This is used for drivers
368  *      that need their own block copy routines into the buffer. There is no
369  *      guarantee the buffer is a DMA target!
370  *
371  *      Locking: May call functions taking tty->buf.lock
372  */
373
374 int tty_prepare_flip_string_flags(struct tty_struct *tty,
375                         unsigned char **chars, char **flags, size_t size)
376 {
377         int space = tty_buffer_request_room(tty, size);
378         if (likely(space)) {
379                 struct tty_buffer *tb = tty->buf.tail;
380                 *chars = tb->char_buf_ptr + tb->used;
381                 *flags = tb->flag_buf_ptr + tb->used;
382                 tb->used += space;
383         }
384         return space;
385 }
386 EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
387
388
389
390 /**
391  *      flush_to_ldisc
392  *      @work: tty structure passed from work queue.
393  *
394  *      This routine is called out of the software interrupt to flush data
395  *      from the buffer chain to the line discipline.
396  *
397  *      Locking: holds tty->buf.lock to guard buffer list. Drops the lock
398  *      while invoking the line discipline receive_buf method. The
399  *      receive_buf method is single threaded for each tty instance.
400  */
401
402 static void flush_to_ldisc(struct work_struct *work)
403 {
404         struct tty_struct *tty =
405                 container_of(work, struct tty_struct, buf.work);
406         unsigned long   flags;
407         struct tty_ldisc *disc;
408
409         disc = tty_ldisc_ref(tty);
410         if (disc == NULL)       /*  !TTY_LDISC */
411                 return;
412
413         spin_lock_irqsave(&tty->buf.lock, flags);
414
415         if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
416                 struct tty_buffer *head, *tail = tty->buf.tail;
417                 int seen_tail = 0;
418                 while ((head = tty->buf.head) != NULL) {
419                         int copied;
420                         int count;
421                         char *char_buf;
422                         unsigned char *flag_buf;
423
424                         count = head->commit - head->read;
425                         if (!count) {
426                                 if (head->next == NULL)
427                                         break;
428                                 /*
429                                   There's a possibility tty might get new buffer
430                                   added during the unlock window below. We could
431                                   end up spinning in here forever hogging the CPU
432                                   completely. To avoid this let's have a rest each
433                                   time we processed the tail buffer.
434                                 */
435                                 if (tail == head)
436                                         seen_tail = 1;
437                                 tty->buf.head = head->next;
438                                 tty_buffer_free(tty, head);
439                                 continue;
440                         }
441                         /* Ldisc or user is trying to flush the buffers
442                            we are feeding to the ldisc, stop feeding the
443                            line discipline as we want to empty the queue */
444                         if (test_bit(TTY_FLUSHPENDING, &tty->flags))
445                                 break;
446                         char_buf = head->char_buf_ptr + head->read;
447                         flag_buf = head->flag_buf_ptr + head->read;
448                         spin_unlock_irqrestore(&tty->buf.lock, flags);
449                         copied = disc->ops->receive_buf(tty, char_buf,
450                                                         flag_buf, count);
451                         spin_lock_irqsave(&tty->buf.lock, flags);
452
453                         head->read += copied;
454
455                         if (copied == 0 || seen_tail) {
456                                 schedule_work(&tty->buf.work);
457                                 break;
458                         }
459                 }
460                 clear_bit(TTY_FLUSHING, &tty->flags);
461         }
462
463         /* We may have a deferred request to flush the input buffer,
464            if so pull the chain under the lock and empty the queue */
465         if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
466                 __tty_buffer_flush(tty);
467                 clear_bit(TTY_FLUSHPENDING, &tty->flags);
468                 wake_up(&tty->read_wait);
469         }
470         spin_unlock_irqrestore(&tty->buf.lock, flags);
471
472         tty_ldisc_deref(disc);
473 }
474
475 /**
476  *      tty_flush_to_ldisc
477  *      @tty: tty to push
478  *
479  *      Push the terminal flip buffers to the line discipline.
480  *
481  *      Must not be called from IRQ context.
482  */
483 void tty_flush_to_ldisc(struct tty_struct *tty)
484 {
485         flush_work(&tty->buf.work);
486 }
487
488 /**
489  *      tty_flip_buffer_push    -       terminal
490  *      @tty: tty to push
491  *
492  *      Queue a push of the terminal flip buffers to the line discipline. This
493  *      function must not be called from IRQ context if tty->low_latency is set.
494  *
495  *      In the event of the queue being busy for flipping the work will be
496  *      held off and retried later.
497  *
498  *      Locking: tty buffer lock. Driver locks in low latency mode.
499  */
500
501 void tty_flip_buffer_push(struct tty_struct *tty)
502 {
503         unsigned long flags;
504         spin_lock_irqsave(&tty->buf.lock, flags);
505         if (tty->buf.tail != NULL)
506                 tty->buf.tail->commit = tty->buf.tail->used;
507         spin_unlock_irqrestore(&tty->buf.lock, flags);
508
509         if (tty->low_latency)
510                 flush_to_ldisc(&tty->buf.work);
511         else
512                 schedule_work(&tty->buf.work);
513 }
514 EXPORT_SYMBOL(tty_flip_buffer_push);
515
516 /**
517  *      tty_buffer_init         -       prepare a tty buffer structure
518  *      @tty: tty to initialise
519  *
520  *      Set up the initial state of the buffer management for a tty device.
521  *      Must be called before the other tty buffer functions are used.
522  *
523  *      Locking: none
524  */
525
526 void tty_buffer_init(struct tty_struct *tty)
527 {
528         spin_lock_init(&tty->buf.lock);
529         tty->buf.head = NULL;
530         tty->buf.tail = NULL;
531         tty->buf.free = NULL;
532         tty->buf.memory_used = 0;
533         INIT_WORK(&tty->buf.work, flush_to_ldisc);
534 }
535