Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[pandora-kernel.git] / drivers / gpu / drm / amd / amdkfd / kfd_packet_manager.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
28 #include "kfd_priv.h"
29 #include "kfd_pm4_headers.h"
30 #include "kfd_pm4_opcodes.h"
31
32 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
33                                 unsigned int buffer_size_bytes)
34 {
35         unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
36
37         BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
38         *wptr = temp;
39 }
40
41 static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
42 {
43         union PM4_MES_TYPE_3_HEADER header;
44
45         header.u32all = 0;
46         header.opcode = opcode;
47         header.count = packet_size/sizeof(uint32_t) - 2;
48         header.type = PM4_TYPE_3;
49
50         return header.u32all;
51 }
52
53 static void pm_calc_rlib_size(struct packet_manager *pm,
54                                 unsigned int *rlib_size,
55                                 bool *over_subscription)
56 {
57         unsigned int process_count, queue_count;
58
59         BUG_ON(!pm || !rlib_size || !over_subscription);
60
61         process_count = pm->dqm->processes_count;
62         queue_count = pm->dqm->queue_count;
63
64         /* check if there is over subscription*/
65         *over_subscription = false;
66         if ((process_count > 1) ||
67                 queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) {
68                 *over_subscription = true;
69                 pr_debug("kfd: over subscribed runlist\n");
70         }
71
72         /* calculate run list ib allocation size */
73         *rlib_size = process_count * sizeof(struct pm4_map_process) +
74                      queue_count * sizeof(struct pm4_map_queues);
75
76         /*
77          * Increase the allocation size in case we need a chained run list
78          * when over subscription
79          */
80         if (*over_subscription)
81                 *rlib_size += sizeof(struct pm4_runlist);
82
83         pr_debug("kfd: runlist ib size %d\n", *rlib_size);
84 }
85
86 static int pm_allocate_runlist_ib(struct packet_manager *pm,
87                                 unsigned int **rl_buffer,
88                                 uint64_t *rl_gpu_buffer,
89                                 unsigned int *rl_buffer_size,
90                                 bool *is_over_subscription)
91 {
92         int retval;
93
94         BUG_ON(!pm);
95         BUG_ON(pm->allocated == true);
96         BUG_ON(is_over_subscription == NULL);
97
98         pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
99
100         retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
101                                         *rl_buffer_size,
102                                         PAGE_SIZE,
103                                         KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
104                                         (struct kgd_mem **) &pm->ib_buffer_obj);
105
106         if (retval != 0) {
107                 pr_err("kfd: failed to allocate runlist IB\n");
108                 return retval;
109         }
110
111         *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
112         *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
113
114         memset(*rl_buffer, 0, *rl_buffer_size);
115         pm->allocated = true;
116         return retval;
117 }
118
119 static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
120                         uint64_t ib, size_t ib_size_in_dwords, bool chain)
121 {
122         struct pm4_runlist *packet;
123
124         BUG_ON(!pm || !buffer || !ib);
125
126         packet = (struct pm4_runlist *)buffer;
127
128         memset(buffer, 0, sizeof(struct pm4_runlist));
129         packet->header.u32all = build_pm4_header(IT_RUN_LIST,
130                                                 sizeof(struct pm4_runlist));
131
132         packet->bitfields4.ib_size = ib_size_in_dwords;
133         packet->bitfields4.chain = chain ? 1 : 0;
134         packet->bitfields4.offload_polling = 0;
135         packet->bitfields4.valid = 1;
136         packet->ordinal2 = lower_32_bits(ib);
137         packet->bitfields3.ib_base_hi = upper_32_bits(ib);
138
139         return 0;
140 }
141
142 static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
143                                 struct qcm_process_device *qpd)
144 {
145         struct pm4_map_process *packet;
146         struct queue *cur;
147         uint32_t num_queues;
148
149         BUG_ON(!pm || !buffer || !qpd);
150
151         packet = (struct pm4_map_process *)buffer;
152
153         pr_debug("kfd: In func %s\n", __func__);
154
155         memset(buffer, 0, sizeof(struct pm4_map_process));
156
157         packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
158                                         sizeof(struct pm4_map_process));
159         packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
160         packet->bitfields2.process_quantum = 1;
161         packet->bitfields2.pasid = qpd->pqm->process->pasid;
162         packet->bitfields3.page_table_base = qpd->page_table_base;
163         packet->bitfields10.gds_size = qpd->gds_size;
164         packet->bitfields10.num_gws = qpd->num_gws;
165         packet->bitfields10.num_oac = qpd->num_oac;
166         num_queues = 0;
167         list_for_each_entry(cur, &qpd->queues_list, list)
168                 num_queues++;
169         packet->bitfields10.num_queues = num_queues;
170
171         packet->sh_mem_config = qpd->sh_mem_config;
172         packet->sh_mem_bases = qpd->sh_mem_bases;
173         packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
174         packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
175
176         packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
177         packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
178
179         return 0;
180 }
181
182 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
183                                 struct queue *q)
184 {
185         struct pm4_map_queues *packet;
186
187         BUG_ON(!pm || !buffer || !q);
188
189         pr_debug("kfd: In func %s\n", __func__);
190
191         packet = (struct pm4_map_queues *)buffer;
192         memset(buffer, 0, sizeof(struct pm4_map_queues));
193
194         packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
195                                                 sizeof(struct pm4_map_queues));
196         packet->bitfields2.alloc_format =
197                                 alloc_format__mes_map_queues__one_per_pipe;
198         packet->bitfields2.num_queues = 1;
199         packet->bitfields2.queue_sel =
200                 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
201
202         packet->bitfields2.vidmem = (q->properties.is_interop) ?
203                         vidmem__mes_map_queues__uses_video_memory :
204                         vidmem__mes_map_queues__uses_no_video_memory;
205
206         switch (q->properties.type) {
207         case KFD_QUEUE_TYPE_COMPUTE:
208         case KFD_QUEUE_TYPE_DIQ:
209                 packet->bitfields2.engine_sel =
210                                 engine_sel__mes_map_queues__compute;
211                 break;
212         case KFD_QUEUE_TYPE_SDMA:
213                 packet->bitfields2.engine_sel =
214                                 engine_sel__mes_map_queues__sdma0;
215                 break;
216         default:
217                 BUG();
218                 break;
219         }
220
221         packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
222                         q->properties.doorbell_off;
223
224         packet->mes_map_queues_ordinals[0].mqd_addr_lo =
225                         lower_32_bits(q->gart_mqd_addr);
226
227         packet->mes_map_queues_ordinals[0].mqd_addr_hi =
228                         upper_32_bits(q->gart_mqd_addr);
229
230         packet->mes_map_queues_ordinals[0].wptr_addr_lo =
231                         lower_32_bits((uint64_t)q->properties.write_ptr);
232
233         packet->mes_map_queues_ordinals[0].wptr_addr_hi =
234                         upper_32_bits((uint64_t)q->properties.write_ptr);
235
236         return 0;
237 }
238
239 static int pm_create_runlist_ib(struct packet_manager *pm,
240                                 struct list_head *queues,
241                                 uint64_t *rl_gpu_addr,
242                                 size_t *rl_size_bytes)
243 {
244         unsigned int alloc_size_bytes;
245         unsigned int *rl_buffer, rl_wptr, i;
246         int retval, proccesses_mapped;
247         struct device_process_node *cur;
248         struct qcm_process_device *qpd;
249         struct queue *q;
250         struct kernel_queue *kq;
251         bool is_over_subscription;
252
253         BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
254
255         rl_wptr = retval = proccesses_mapped = 0;
256
257         retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
258                                 &alloc_size_bytes, &is_over_subscription);
259         if (retval != 0)
260                 return retval;
261
262         *rl_size_bytes = alloc_size_bytes;
263
264         pr_debug("kfd: In func %s\n", __func__);
265         pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
266                 pm->dqm->processes_count, pm->dqm->queue_count);
267
268         /* build the run list ib packet */
269         list_for_each_entry(cur, queues, list) {
270                 qpd = cur->qpd;
271                 /* build map process packet */
272                 if (proccesses_mapped >= pm->dqm->processes_count) {
273                         pr_debug("kfd: not enough space left in runlist IB\n");
274                         pm_release_ib(pm);
275                         return -ENOMEM;
276                 }
277                 retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
278                 if (retval != 0)
279                         return retval;
280                 proccesses_mapped++;
281                 inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
282                                 alloc_size_bytes);
283
284                 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
285                         if (kq->queue->properties.is_active != true)
286                                 continue;
287                         retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
288                                                         kq->queue);
289                         if (retval != 0)
290                                 return retval;
291                         inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
292                                         alloc_size_bytes);
293                 }
294
295                 list_for_each_entry(q, &qpd->queues_list, list) {
296                         if (q->properties.is_active != true)
297                                 continue;
298                         retval = pm_create_map_queue(pm,
299                                                 &rl_buffer[rl_wptr], q);
300                         if (retval != 0)
301                                 return retval;
302                         inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
303                                         alloc_size_bytes);
304                 }
305         }
306
307         pr_debug("kfd: finished map process and queues to runlist\n");
308
309         if (is_over_subscription)
310                 pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
311                                 alloc_size_bytes / sizeof(uint32_t), true);
312
313         for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
314                 pr_debug("0x%2X ", rl_buffer[i]);
315         pr_debug("\n");
316
317         return 0;
318 }
319
320 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
321 {
322         BUG_ON(!dqm);
323
324         pm->dqm = dqm;
325         mutex_init(&pm->lock);
326         pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
327         if (pm->priv_queue == NULL) {
328                 mutex_destroy(&pm->lock);
329                 return -ENOMEM;
330         }
331         pm->allocated = false;
332
333         return 0;
334 }
335
336 void pm_uninit(struct packet_manager *pm)
337 {
338         BUG_ON(!pm);
339
340         mutex_destroy(&pm->lock);
341         kernel_queue_uninit(pm->priv_queue);
342 }
343
344 int pm_send_set_resources(struct packet_manager *pm,
345                                 struct scheduling_resources *res)
346 {
347         struct pm4_set_resources *packet;
348
349         BUG_ON(!pm || !res);
350
351         pr_debug("kfd: In func %s\n", __func__);
352
353         mutex_lock(&pm->lock);
354         pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
355                                         sizeof(*packet) / sizeof(uint32_t),
356                         (unsigned int **)&packet);
357         if (packet == NULL) {
358                 mutex_unlock(&pm->lock);
359                 pr_err("kfd: failed to allocate buffer on kernel queue\n");
360                 return -ENOMEM;
361         }
362
363         memset(packet, 0, sizeof(struct pm4_set_resources));
364         packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
365                                         sizeof(struct pm4_set_resources));
366
367         packet->bitfields2.queue_type =
368                         queue_type__mes_set_resources__hsa_interface_queue_hiq;
369         packet->bitfields2.vmid_mask = res->vmid_mask;
370         packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
371         packet->bitfields7.oac_mask = res->oac_mask;
372         packet->bitfields8.gds_heap_base = res->gds_heap_base;
373         packet->bitfields8.gds_heap_size = res->gds_heap_size;
374
375         packet->gws_mask_lo = lower_32_bits(res->gws_mask);
376         packet->gws_mask_hi = upper_32_bits(res->gws_mask);
377
378         packet->queue_mask_lo = lower_32_bits(res->queue_mask);
379         packet->queue_mask_hi = upper_32_bits(res->queue_mask);
380
381         pm->priv_queue->submit_packet(pm->priv_queue);
382         pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
383
384         mutex_unlock(&pm->lock);
385
386         return 0;
387 }
388
389 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
390 {
391         uint64_t rl_gpu_ib_addr;
392         uint32_t *rl_buffer;
393         size_t rl_ib_size, packet_size_dwords;
394         int retval;
395
396         BUG_ON(!pm || !dqm_queues);
397
398         retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
399                                         &rl_ib_size);
400         if (retval != 0)
401                 goto fail_create_runlist_ib;
402
403         pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
404
405         packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
406         mutex_lock(&pm->lock);
407
408         retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
409                                         packet_size_dwords, &rl_buffer);
410         if (retval != 0)
411                 goto fail_acquire_packet_buffer;
412
413         retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
414                                         rl_ib_size / sizeof(uint32_t), false);
415         if (retval != 0)
416                 goto fail_create_runlist;
417
418         pm->priv_queue->submit_packet(pm->priv_queue);
419         pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
420
421         mutex_unlock(&pm->lock);
422
423         return retval;
424
425 fail_create_runlist:
426         pm->priv_queue->rollback_packet(pm->priv_queue);
427 fail_acquire_packet_buffer:
428         mutex_unlock(&pm->lock);
429 fail_create_runlist_ib:
430         if (pm->allocated == true)
431                 pm_release_ib(pm);
432         return retval;
433 }
434
435 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
436                         uint32_t fence_value)
437 {
438         int retval;
439         struct pm4_query_status *packet;
440
441         BUG_ON(!pm || !fence_address);
442
443         mutex_lock(&pm->lock);
444         retval = pm->priv_queue->acquire_packet_buffer(
445                         pm->priv_queue,
446                         sizeof(struct pm4_query_status) / sizeof(uint32_t),
447                         (unsigned int **)&packet);
448         if (retval != 0)
449                 goto fail_acquire_packet_buffer;
450
451         packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
452                                         sizeof(struct pm4_query_status));
453
454         packet->bitfields2.context_id = 0;
455         packet->bitfields2.interrupt_sel =
456                         interrupt_sel__mes_query_status__completion_status;
457         packet->bitfields2.command =
458                         command__mes_query_status__fence_only_after_write_ack;
459
460         packet->addr_hi = upper_32_bits((uint64_t)fence_address);
461         packet->addr_lo = lower_32_bits((uint64_t)fence_address);
462         packet->data_hi = upper_32_bits((uint64_t)fence_value);
463         packet->data_lo = lower_32_bits((uint64_t)fence_value);
464
465         pm->priv_queue->submit_packet(pm->priv_queue);
466         pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
467         mutex_unlock(&pm->lock);
468
469         return 0;
470
471 fail_acquire_packet_buffer:
472         mutex_unlock(&pm->lock);
473         return retval;
474 }
475
476 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
477                         enum kfd_preempt_type_filter mode,
478                         uint32_t filter_param, bool reset,
479                         unsigned int sdma_engine)
480 {
481         int retval;
482         uint32_t *buffer;
483         struct pm4_unmap_queues *packet;
484
485         BUG_ON(!pm);
486
487         mutex_lock(&pm->lock);
488         retval = pm->priv_queue->acquire_packet_buffer(
489                         pm->priv_queue,
490                         sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
491                         &buffer);
492         if (retval != 0)
493                 goto err_acquire_packet_buffer;
494
495         packet = (struct pm4_unmap_queues *)buffer;
496         memset(buffer, 0, sizeof(struct pm4_unmap_queues));
497
498         packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
499                                         sizeof(struct pm4_unmap_queues));
500         switch (type) {
501         case KFD_QUEUE_TYPE_COMPUTE:
502         case KFD_QUEUE_TYPE_DIQ:
503                 packet->bitfields2.engine_sel =
504                         engine_sel__mes_unmap_queues__compute;
505                 break;
506         case KFD_QUEUE_TYPE_SDMA:
507                 packet->bitfields2.engine_sel =
508                         engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
509                 break;
510         default:
511                 BUG();
512                 break;
513         }
514
515         if (reset)
516                 packet->bitfields2.action =
517                                 action__mes_unmap_queues__reset_queues;
518         else
519                 packet->bitfields2.action =
520                                 action__mes_unmap_queues__preempt_queues;
521
522         switch (mode) {
523         case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
524                 packet->bitfields2.queue_sel =
525                                 queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
526                 packet->bitfields2.num_queues = 1;
527                 packet->bitfields3b.doorbell_offset0 = filter_param;
528                 break;
529         case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
530                 packet->bitfields2.queue_sel =
531                                 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
532                 packet->bitfields3a.pasid = filter_param;
533                 break;
534         case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
535                 packet->bitfields2.queue_sel =
536                                 queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
537                 break;
538         default:
539                 BUG();
540                 break;
541         };
542
543         pm->priv_queue->submit_packet(pm->priv_queue);
544         pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
545
546         mutex_unlock(&pm->lock);
547         return 0;
548
549 err_acquire_packet_buffer:
550         mutex_unlock(&pm->lock);
551         return retval;
552 }
553
554 void pm_release_ib(struct packet_manager *pm)
555 {
556         BUG_ON(!pm);
557
558         mutex_lock(&pm->lock);
559         if (pm->allocated) {
560                 kfd2kgd->free_mem(pm->dqm->dev->kgd,
561                                 (struct kgd_mem *) pm->ib_buffer_obj);
562                 pm->allocated = false;
563         }
564         mutex_unlock(&pm->lock);
565 }