Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2/* Copyright(c) 2014 - 2020 Intel Corporation */
3#include <linux/mutex.h>
4#include <linux/list.h>
5#include <linux/bitops.h>
6#include <linux/delay.h>
7#include "adf_accel_devices.h"
8#include "adf_cfg.h"
9#include "adf_common_drv.h"
10#include "adf_dbgfs.h"
11#include "adf_heartbeat.h"
12#include "adf_rl.h"
13#include "adf_sysfs_anti_rb.h"
14#include "adf_sysfs_ras_counters.h"
15#include "adf_telemetry.h"
16
17static LIST_HEAD(service_table);
18static DEFINE_MUTEX(service_lock);
19
20static void adf_service_add(struct service_hndl *service)
21{
22 mutex_lock(&service_lock);
23 list_add(&service->list, &service_table);
24 mutex_unlock(&service_lock);
25}
26
27int adf_service_register(struct service_hndl *service)
28{
29 memset(service->init_status, 0, sizeof(service->init_status));
30 memset(service->start_status, 0, sizeof(service->start_status));
31 adf_service_add(service);
32 return 0;
33}
34
35static void adf_service_remove(struct service_hndl *service)
36{
37 mutex_lock(&service_lock);
38 list_del(&service->list);
39 mutex_unlock(&service_lock);
40}
41
42int adf_service_unregister(struct service_hndl *service)
43{
44 int i;
45
46 for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
47 if (service->init_status[i] || service->start_status[i]) {
48 pr_err("QAT: Could not remove active service\n");
49 return -EFAULT;
50 }
51 }
52 adf_service_remove(service);
53 return 0;
54}
55
56/**
57 * adf_dev_init() - Init data structures and services for the given accel device
58 * @accel_dev: Pointer to acceleration device.
59 *
60 * Initialize the ring data structures and the admin comms and arbitration
61 * services.
62 *
63 * Return: 0 on success, error code otherwise.
64 */
65static int adf_dev_init(struct adf_accel_dev *accel_dev)
66{
67 struct service_hndl *service;
68 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
69 int ret;
70
71 if (!hw_data) {
72 dev_err(&GET_DEV(accel_dev),
73 "Failed to init device - hw_data not set\n");
74 return -EFAULT;
75 }
76
77 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
78 !accel_dev->is_vf) {
79 dev_err(&GET_DEV(accel_dev), "Device not configured\n");
80 return -EFAULT;
81 }
82
83 if (adf_init_etr_data(accel_dev)) {
84 dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
85 return -EFAULT;
86 }
87
88 if (hw_data->init_device && hw_data->init_device(accel_dev)) {
89 dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
90 return -EFAULT;
91 }
92
93 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
94 dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
95 return -EFAULT;
96 }
97
98 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
99 dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
100 return -EFAULT;
101 }
102
103 if (hw_data->get_ring_to_svc_map)
104 hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev);
105
106 if (adf_ae_init(accel_dev)) {
107 dev_err(&GET_DEV(accel_dev),
108 "Failed to initialise Acceleration Engine\n");
109 return -EFAULT;
110 }
111 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
112
113 if (adf_ae_fw_load(accel_dev)) {
114 dev_err(&GET_DEV(accel_dev),
115 "Failed to load acceleration FW\n");
116 return -EFAULT;
117 }
118 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
119
120 if (hw_data->alloc_irq(accel_dev)) {
121 dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
122 return -EFAULT;
123 }
124 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
125
126 if (hw_data->ras_ops.enable_ras_errors)
127 hw_data->ras_ops.enable_ras_errors(accel_dev);
128
129 hw_data->enable_ints(accel_dev);
130 hw_data->enable_error_correction(accel_dev);
131
132 ret = hw_data->pfvf_ops.enable_comms(accel_dev);
133 if (ret)
134 return ret;
135
136 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
137 accel_dev->is_vf) {
138 if (qat_crypto_vf_dev_config(accel_dev))
139 return -EFAULT;
140 }
141
142 adf_heartbeat_init(accel_dev);
143 ret = adf_rl_init(accel_dev);
144 if (ret && ret != -EOPNOTSUPP)
145 return ret;
146
147 ret = adf_tl_init(accel_dev);
148 if (ret && ret != -EOPNOTSUPP)
149 return ret;
150
151 /*
152 * Subservice initialisation is divided into two stages: init and start.
153 * This is to facilitate any ordering dependencies between services
154 * prior to starting any of the accelerators.
155 */
156 list_for_each_entry(service, &service_table, list) {
157 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
158 dev_err(&GET_DEV(accel_dev),
159 "Failed to initialise service %s\n",
160 service->name);
161 return -EFAULT;
162 }
163 set_bit(accel_dev->accel_id, service->init_status);
164 }
165
166 return 0;
167}
168
169/**
170 * adf_dev_start() - Start acceleration service for the given accel device
171 * @accel_dev: Pointer to acceleration device.
172 *
173 * Function notifies all the registered services that the acceleration device
174 * is ready to be used.
175 * To be used by QAT device specific drivers.
176 *
177 * Return: 0 on success, error code otherwise.
178 */
179static int adf_dev_start(struct adf_accel_dev *accel_dev)
180{
181 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
182 struct service_hndl *service;
183 u32 caps;
184 int ret;
185
186 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
187
188 if (adf_ae_start(accel_dev)) {
189 dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
190 return -EFAULT;
191 }
192 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
193
194 if (hw_data->send_admin_init(accel_dev)) {
195 dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
196 return -EFAULT;
197 }
198
199 if (hw_data->measure_clock) {
200 ret = hw_data->measure_clock(accel_dev);
201 if (ret) {
202 dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
203 return ret;
204 }
205 }
206
207 /* Set ssm watch dog timer */
208 if (hw_data->set_ssm_wdtimer)
209 hw_data->set_ssm_wdtimer(accel_dev);
210
211 /* Enable Power Management */
212 if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
213 dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
214 return -EFAULT;
215 }
216
217 if (hw_data->start_timer) {
218 ret = hw_data->start_timer(accel_dev);
219 if (ret) {
220 dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
221 return ret;
222 }
223 }
224
225 adf_heartbeat_start(accel_dev);
226 ret = adf_rl_start(accel_dev);
227 if (ret && ret != -EOPNOTSUPP)
228 return ret;
229
230 ret = adf_tl_start(accel_dev);
231 if (ret && ret != -EOPNOTSUPP)
232 return ret;
233
234 list_for_each_entry(service, &service_table, list) {
235 if (service->event_hld(accel_dev, ADF_EVENT_START)) {
236 dev_err(&GET_DEV(accel_dev),
237 "Failed to start service %s\n",
238 service->name);
239 return -EFAULT;
240 }
241 set_bit(accel_dev->accel_id, service->start_status);
242 }
243
244 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
245 set_bit(ADF_STATUS_STARTED, &accel_dev->status);
246
247 if (!list_empty(&accel_dev->crypto_list) &&
248 (qat_algs_register() || qat_asym_algs_register())) {
249 dev_err(&GET_DEV(accel_dev),
250 "Failed to register crypto algs\n");
251 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
252 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
253 return -EFAULT;
254 }
255 set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
256
257 caps = hw_data->accel_capabilities_ext_mask;
258 if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register(caps)) {
259 dev_err(&GET_DEV(accel_dev),
260 "Failed to register compression algs\n");
261 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
262 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
263 return -EFAULT;
264 }
265 set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
266
267 adf_dbgfs_add(accel_dev);
268 adf_sysfs_start_ras(accel_dev);
269 adf_sysfs_start_arb(accel_dev);
270
271 return 0;
272}
273
274/**
275 * adf_dev_stop() - Stop acceleration service for the given accel device
276 * @accel_dev: Pointer to acceleration device.
277 *
278 * Function notifies all the registered services that the acceleration device
279 * is shuting down.
280 * To be used by QAT device specific drivers.
281 *
282 * Return: void
283 */
284static void adf_dev_stop(struct adf_accel_dev *accel_dev)
285{
286 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
287 struct service_hndl *service;
288 bool wait = false;
289 int ret;
290
291 if (!adf_dev_started(accel_dev) &&
292 !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
293 return;
294
295 adf_tl_stop(accel_dev);
296 adf_rl_stop(accel_dev);
297 adf_dbgfs_rm(accel_dev);
298 adf_sysfs_stop_ras(accel_dev);
299 adf_sysfs_stop_arb(accel_dev);
300
301 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
302 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
303
304 if (!list_empty(&accel_dev->crypto_list) &&
305 test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
306 qat_algs_unregister();
307 qat_asym_algs_unregister();
308 }
309 clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
310
311 if (!list_empty(&accel_dev->compression_list) &&
312 test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
313 qat_comp_algs_unregister(hw_data->accel_capabilities_ext_mask);
314 clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
315
316 list_for_each_entry(service, &service_table, list) {
317 if (!test_bit(accel_dev->accel_id, service->start_status))
318 continue;
319 ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
320 if (!ret) {
321 clear_bit(accel_dev->accel_id, service->start_status);
322 } else if (ret == -EAGAIN) {
323 wait = true;
324 clear_bit(accel_dev->accel_id, service->start_status);
325 }
326 }
327
328 if (hw_data->stop_timer)
329 hw_data->stop_timer(accel_dev);
330
331 hw_data->disable_iov(accel_dev);
332
333 if (wait)
334 msleep(100);
335
336 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
337 if (adf_ae_stop(accel_dev))
338 dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
339 else
340 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
341 }
342}
343
344/**
345 * adf_dev_shutdown() - shutdown acceleration services and data strucutures
346 * @accel_dev: Pointer to acceleration device
347 *
348 * Cleanup the ring data structures and the admin comms and arbitration
349 * services.
350 */
351static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
352{
353 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
354 struct service_hndl *service;
355
356 if (!hw_data) {
357 dev_err(&GET_DEV(accel_dev),
358 "QAT: Failed to shutdown device - hw_data not set\n");
359 return;
360 }
361
362 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
363 adf_ae_fw_release(accel_dev);
364 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
365 }
366
367 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
368 if (adf_ae_shutdown(accel_dev))
369 dev_err(&GET_DEV(accel_dev),
370 "Failed to shutdown Accel Engine\n");
371 else
372 clear_bit(ADF_STATUS_AE_INITIALISED,
373 &accel_dev->status);
374 }
375
376 list_for_each_entry(service, &service_table, list) {
377 if (!test_bit(accel_dev->accel_id, service->init_status))
378 continue;
379 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
380 dev_err(&GET_DEV(accel_dev),
381 "Failed to shutdown service %s\n",
382 service->name);
383 else
384 clear_bit(accel_dev->accel_id, service->init_status);
385 }
386
387 adf_rl_exit(accel_dev);
388
389 if (hw_data->ras_ops.disable_ras_errors)
390 hw_data->ras_ops.disable_ras_errors(accel_dev);
391
392 adf_heartbeat_shutdown(accel_dev);
393
394 adf_tl_shutdown(accel_dev);
395
396 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
397 hw_data->free_irq(accel_dev);
398 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
399 }
400
401 /* If not restarting, delete all cfg sections except for GENERAL */
402 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
403 adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
404
405 if (hw_data->exit_arb)
406 hw_data->exit_arb(accel_dev);
407
408 if (hw_data->exit_admin_comms)
409 hw_data->exit_admin_comms(accel_dev);
410
411 adf_cleanup_etr_data(accel_dev);
412 adf_misc_wq_flush();
413 adf_dev_restore(accel_dev);
414}
415
416int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
417{
418 struct service_hndl *service;
419
420 list_for_each_entry(service, &service_table, list) {
421 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
422 dev_err(&GET_DEV(accel_dev),
423 "Failed to restart service %s.\n",
424 service->name);
425 }
426 return 0;
427}
428
429int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
430{
431 struct service_hndl *service;
432
433 list_for_each_entry(service, &service_table, list) {
434 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
435 dev_err(&GET_DEV(accel_dev),
436 "Failed to restart service %s.\n",
437 service->name);
438 }
439 return 0;
440}
441
442void adf_error_notifier(struct adf_accel_dev *accel_dev)
443{
444 struct service_hndl *service;
445
446 list_for_each_entry(service, &service_table, list) {
447 if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR))
448 dev_err(&GET_DEV(accel_dev),
449 "Failed to send error event to %s.\n",
450 service->name);
451 }
452}
453
454int adf_dev_down(struct adf_accel_dev *accel_dev)
455{
456 int ret = 0;
457
458 if (!accel_dev)
459 return -EINVAL;
460
461 mutex_lock(&accel_dev->state_lock);
462
463 adf_dev_stop(accel_dev);
464 adf_dev_shutdown(accel_dev);
465
466 mutex_unlock(&accel_dev->state_lock);
467 return ret;
468}
469EXPORT_SYMBOL_GPL(adf_dev_down);
470
471int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
472{
473 int ret = 0;
474
475 if (!accel_dev)
476 return -EINVAL;
477
478 mutex_lock(&accel_dev->state_lock);
479
480 if (adf_dev_started(accel_dev)) {
481 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
482 accel_dev->accel_id);
483 ret = -EALREADY;
484 goto out;
485 }
486
487 if (config && GET_HW_DATA(accel_dev)->dev_config) {
488 ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
489 if (unlikely(ret))
490 goto out;
491 }
492
493 ret = adf_dev_init(accel_dev);
494 if (unlikely(ret))
495 goto out;
496
497 ret = adf_dev_start(accel_dev);
498
499out:
500 mutex_unlock(&accel_dev->state_lock);
501 return ret;
502}
503EXPORT_SYMBOL_GPL(adf_dev_up);
504
505int adf_dev_restart(struct adf_accel_dev *accel_dev)
506{
507 int ret = 0;
508
509 if (!accel_dev)
510 return -EFAULT;
511
512 adf_dev_down(accel_dev);
513
514 ret = adf_dev_up(accel_dev, false);
515 /* if device is already up return success*/
516 if (ret == -EALREADY)
517 return 0;
518
519 return ret;
520}
521EXPORT_SYMBOL_GPL(adf_dev_restart);