2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
29 #include "kfd_pm4_headers.h"
30 #include "kfd_pm4_opcodes.h"
32 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
33 unsigned int buffer_size_bytes)
35 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
37 BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
41 static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
43 union PM4_MES_TYPE_3_HEADER header;
46 header.opcode = opcode;
47 header.count = packet_size/sizeof(uint32_t) - 2;
48 header.type = PM4_TYPE_3;
53 static void pm_calc_rlib_size(struct packet_manager *pm,
54 unsigned int *rlib_size,
55 bool *over_subscription)
57 unsigned int process_count, queue_count;
59 BUG_ON(!pm || !rlib_size || !over_subscription);
61 process_count = pm->dqm->processes_count;
62 queue_count = pm->dqm->queue_count;
64 /* check if there is over subscription*/
65 *over_subscription = false;
66 if ((process_count > 1) ||
67 queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) {
68 *over_subscription = true;
69 pr_debug("kfd: over subscribed runlist\n");
72 /* calculate run list ib allocation size */
73 *rlib_size = process_count * sizeof(struct pm4_map_process) +
74 queue_count * sizeof(struct pm4_map_queues);
77 * Increase the allocation size in case we need a chained run list
78 * when over subscription
80 if (*over_subscription)
81 *rlib_size += sizeof(struct pm4_runlist);
83 pr_debug("kfd: runlist ib size %d\n", *rlib_size);
86 static int pm_allocate_runlist_ib(struct packet_manager *pm,
87 unsigned int **rl_buffer,
88 uint64_t *rl_gpu_buffer,
89 unsigned int *rl_buffer_size,
90 bool *is_over_subscription)
95 BUG_ON(pm->allocated == true);
96 BUG_ON(is_over_subscription == NULL);
98 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
100 retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
103 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
104 (struct kgd_mem **) &pm->ib_buffer_obj);
107 pr_err("kfd: failed to allocate runlist IB\n");
111 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
112 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
114 memset(*rl_buffer, 0, *rl_buffer_size);
115 pm->allocated = true;
119 static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
120 uint64_t ib, size_t ib_size_in_dwords, bool chain)
122 struct pm4_runlist *packet;
124 BUG_ON(!pm || !buffer || !ib);
126 packet = (struct pm4_runlist *)buffer;
128 memset(buffer, 0, sizeof(struct pm4_runlist));
129 packet->header.u32all = build_pm4_header(IT_RUN_LIST,
130 sizeof(struct pm4_runlist));
132 packet->bitfields4.ib_size = ib_size_in_dwords;
133 packet->bitfields4.chain = chain ? 1 : 0;
134 packet->bitfields4.offload_polling = 0;
135 packet->bitfields4.valid = 1;
136 packet->ordinal2 = lower_32_bits(ib);
137 packet->bitfields3.ib_base_hi = upper_32_bits(ib);
142 static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
143 struct qcm_process_device *qpd)
145 struct pm4_map_process *packet;
149 BUG_ON(!pm || !buffer || !qpd);
151 packet = (struct pm4_map_process *)buffer;
153 pr_debug("kfd: In func %s\n", __func__);
155 memset(buffer, 0, sizeof(struct pm4_map_process));
157 packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
158 sizeof(struct pm4_map_process));
159 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
160 packet->bitfields2.process_quantum = 1;
161 packet->bitfields2.pasid = qpd->pqm->process->pasid;
162 packet->bitfields3.page_table_base = qpd->page_table_base;
163 packet->bitfields10.gds_size = qpd->gds_size;
164 packet->bitfields10.num_gws = qpd->num_gws;
165 packet->bitfields10.num_oac = qpd->num_oac;
167 list_for_each_entry(cur, &qpd->queues_list, list)
169 packet->bitfields10.num_queues = num_queues;
171 packet->sh_mem_config = qpd->sh_mem_config;
172 packet->sh_mem_bases = qpd->sh_mem_bases;
173 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
174 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
176 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
177 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
182 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
185 struct pm4_map_queues *packet;
187 BUG_ON(!pm || !buffer || !q);
189 pr_debug("kfd: In func %s\n", __func__);
191 packet = (struct pm4_map_queues *)buffer;
192 memset(buffer, 0, sizeof(struct pm4_map_queues));
194 packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
195 sizeof(struct pm4_map_queues));
196 packet->bitfields2.alloc_format =
197 alloc_format__mes_map_queues__one_per_pipe;
198 packet->bitfields2.num_queues = 1;
199 packet->bitfields2.queue_sel =
200 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
202 packet->bitfields2.vidmem = (q->properties.is_interop) ?
203 vidmem__mes_map_queues__uses_video_memory :
204 vidmem__mes_map_queues__uses_no_video_memory;
206 switch (q->properties.type) {
207 case KFD_QUEUE_TYPE_COMPUTE:
208 case KFD_QUEUE_TYPE_DIQ:
209 packet->bitfields2.engine_sel =
210 engine_sel__mes_map_queues__compute;
212 case KFD_QUEUE_TYPE_SDMA:
213 packet->bitfields2.engine_sel =
214 engine_sel__mes_map_queues__sdma0;
221 packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
222 q->properties.doorbell_off;
224 packet->mes_map_queues_ordinals[0].mqd_addr_lo =
225 lower_32_bits(q->gart_mqd_addr);
227 packet->mes_map_queues_ordinals[0].mqd_addr_hi =
228 upper_32_bits(q->gart_mqd_addr);
230 packet->mes_map_queues_ordinals[0].wptr_addr_lo =
231 lower_32_bits((uint64_t)q->properties.write_ptr);
233 packet->mes_map_queues_ordinals[0].wptr_addr_hi =
234 upper_32_bits((uint64_t)q->properties.write_ptr);
239 static int pm_create_runlist_ib(struct packet_manager *pm,
240 struct list_head *queues,
241 uint64_t *rl_gpu_addr,
242 size_t *rl_size_bytes)
244 unsigned int alloc_size_bytes;
245 unsigned int *rl_buffer, rl_wptr, i;
246 int retval, proccesses_mapped;
247 struct device_process_node *cur;
248 struct qcm_process_device *qpd;
250 struct kernel_queue *kq;
251 bool is_over_subscription;
253 BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
255 rl_wptr = retval = proccesses_mapped = 0;
257 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
258 &alloc_size_bytes, &is_over_subscription);
262 *rl_size_bytes = alloc_size_bytes;
264 pr_debug("kfd: In func %s\n", __func__);
265 pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
266 pm->dqm->processes_count, pm->dqm->queue_count);
268 /* build the run list ib packet */
269 list_for_each_entry(cur, queues, list) {
271 /* build map process packet */
272 if (proccesses_mapped >= pm->dqm->processes_count) {
273 pr_debug("kfd: not enough space left in runlist IB\n");
277 retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
281 inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
284 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
285 if (kq->queue->properties.is_active != true)
287 retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
291 inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
295 list_for_each_entry(q, &qpd->queues_list, list) {
296 if (q->properties.is_active != true)
298 retval = pm_create_map_queue(pm,
299 &rl_buffer[rl_wptr], q);
302 inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
307 pr_debug("kfd: finished map process and queues to runlist\n");
309 if (is_over_subscription)
310 pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
311 alloc_size_bytes / sizeof(uint32_t), true);
313 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
314 pr_debug("0x%2X ", rl_buffer[i]);
320 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
325 mutex_init(&pm->lock);
326 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
327 if (pm->priv_queue == NULL) {
328 mutex_destroy(&pm->lock);
331 pm->allocated = false;
336 void pm_uninit(struct packet_manager *pm)
340 mutex_destroy(&pm->lock);
341 kernel_queue_uninit(pm->priv_queue);
344 int pm_send_set_resources(struct packet_manager *pm,
345 struct scheduling_resources *res)
347 struct pm4_set_resources *packet;
351 pr_debug("kfd: In func %s\n", __func__);
353 mutex_lock(&pm->lock);
354 pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
355 sizeof(*packet) / sizeof(uint32_t),
356 (unsigned int **)&packet);
357 if (packet == NULL) {
358 mutex_unlock(&pm->lock);
359 pr_err("kfd: failed to allocate buffer on kernel queue\n");
363 memset(packet, 0, sizeof(struct pm4_set_resources));
364 packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
365 sizeof(struct pm4_set_resources));
367 packet->bitfields2.queue_type =
368 queue_type__mes_set_resources__hsa_interface_queue_hiq;
369 packet->bitfields2.vmid_mask = res->vmid_mask;
370 packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
371 packet->bitfields7.oac_mask = res->oac_mask;
372 packet->bitfields8.gds_heap_base = res->gds_heap_base;
373 packet->bitfields8.gds_heap_size = res->gds_heap_size;
375 packet->gws_mask_lo = lower_32_bits(res->gws_mask);
376 packet->gws_mask_hi = upper_32_bits(res->gws_mask);
378 packet->queue_mask_lo = lower_32_bits(res->queue_mask);
379 packet->queue_mask_hi = upper_32_bits(res->queue_mask);
381 pm->priv_queue->submit_packet(pm->priv_queue);
382 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
384 mutex_unlock(&pm->lock);
389 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
391 uint64_t rl_gpu_ib_addr;
393 size_t rl_ib_size, packet_size_dwords;
396 BUG_ON(!pm || !dqm_queues);
398 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
401 goto fail_create_runlist_ib;
403 pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
405 packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
406 mutex_lock(&pm->lock);
408 retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
409 packet_size_dwords, &rl_buffer);
411 goto fail_acquire_packet_buffer;
413 retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
414 rl_ib_size / sizeof(uint32_t), false);
416 goto fail_create_runlist;
418 pm->priv_queue->submit_packet(pm->priv_queue);
419 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
421 mutex_unlock(&pm->lock);
426 pm->priv_queue->rollback_packet(pm->priv_queue);
427 fail_acquire_packet_buffer:
428 mutex_unlock(&pm->lock);
429 fail_create_runlist_ib:
430 if (pm->allocated == true)
435 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
436 uint32_t fence_value)
439 struct pm4_query_status *packet;
441 BUG_ON(!pm || !fence_address);
443 mutex_lock(&pm->lock);
444 retval = pm->priv_queue->acquire_packet_buffer(
446 sizeof(struct pm4_query_status) / sizeof(uint32_t),
447 (unsigned int **)&packet);
449 goto fail_acquire_packet_buffer;
451 packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
452 sizeof(struct pm4_query_status));
454 packet->bitfields2.context_id = 0;
455 packet->bitfields2.interrupt_sel =
456 interrupt_sel__mes_query_status__completion_status;
457 packet->bitfields2.command =
458 command__mes_query_status__fence_only_after_write_ack;
460 packet->addr_hi = upper_32_bits((uint64_t)fence_address);
461 packet->addr_lo = lower_32_bits((uint64_t)fence_address);
462 packet->data_hi = upper_32_bits((uint64_t)fence_value);
463 packet->data_lo = lower_32_bits((uint64_t)fence_value);
465 pm->priv_queue->submit_packet(pm->priv_queue);
466 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
467 mutex_unlock(&pm->lock);
471 fail_acquire_packet_buffer:
472 mutex_unlock(&pm->lock);
476 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
477 enum kfd_preempt_type_filter mode,
478 uint32_t filter_param, bool reset,
479 unsigned int sdma_engine)
483 struct pm4_unmap_queues *packet;
487 mutex_lock(&pm->lock);
488 retval = pm->priv_queue->acquire_packet_buffer(
490 sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
493 goto err_acquire_packet_buffer;
495 packet = (struct pm4_unmap_queues *)buffer;
496 memset(buffer, 0, sizeof(struct pm4_unmap_queues));
498 packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
499 sizeof(struct pm4_unmap_queues));
501 case KFD_QUEUE_TYPE_COMPUTE:
502 case KFD_QUEUE_TYPE_DIQ:
503 packet->bitfields2.engine_sel =
504 engine_sel__mes_unmap_queues__compute;
506 case KFD_QUEUE_TYPE_SDMA:
507 packet->bitfields2.engine_sel =
508 engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
516 packet->bitfields2.action =
517 action__mes_unmap_queues__reset_queues;
519 packet->bitfields2.action =
520 action__mes_unmap_queues__preempt_queues;
523 case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
524 packet->bitfields2.queue_sel =
525 queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
526 packet->bitfields2.num_queues = 1;
527 packet->bitfields3b.doorbell_offset0 = filter_param;
529 case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
530 packet->bitfields2.queue_sel =
531 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
532 packet->bitfields3a.pasid = filter_param;
534 case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
535 packet->bitfields2.queue_sel =
536 queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
543 pm->priv_queue->submit_packet(pm->priv_queue);
544 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
546 mutex_unlock(&pm->lock);
549 err_acquire_packet_buffer:
550 mutex_unlock(&pm->lock);
554 void pm_release_ib(struct packet_manager *pm)
558 mutex_lock(&pm->lock);
560 kfd2kgd->free_mem(pm->dqm->dev->kgd,
561 (struct kgd_mem *) pm->ib_buffer_obj);
562 pm->allocated = false;
564 mutex_unlock(&pm->lock);