Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <linux/acpi.h>
4#include <linux/bitops.h>
5#include <linux/debugfs.h>
6#include <linux/init.h>
7#include <linux/io.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/pci.h>
11#include <linux/pm_runtime.h>
12#include <linux/seq_file.h>
13#include <linux/topology.h>
14#include <linux/uacce.h>
15#include "zip.h"
16
17#define CAP_FILE_PERMISSION 0444
18#define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250
19
20#define HZIP_QUEUE_NUM_V1 4096
21
22#define HZIP_CLOCK_GATE_CTRL 0x301004
23#define HZIP_DECOMP_CHECK_ENABLE BIT(16)
24#define HZIP_FSM_MAX_CNT 0x301008
25
26#define HZIP_PORT_ARCA_CHE_0 0x301040
27#define HZIP_PORT_ARCA_CHE_1 0x301044
28#define HZIP_PORT_AWCA_CHE_0 0x301060
29#define HZIP_PORT_AWCA_CHE_1 0x301064
30#define HZIP_CACHE_ALL_EN 0xffffffff
31
32#define HZIP_BD_RUSER_32_63 0x301110
33#define HZIP_SGL_RUSER_32_63 0x30111c
34#define HZIP_DATA_RUSER_32_63 0x301128
35#define HZIP_DATA_WUSER_32_63 0x301134
36#define HZIP_BD_WUSER_32_63 0x301140
37
38#define HZIP_QM_IDEL_STATUS 0x3040e4
39
40#define HZIP_CORE_DFX_BASE 0x301000
41#define HZIP_CORE_DFX_DECOMP_BASE 0x304000
42#define HZIP_CORE_DFX_COMP_0 0x302000
43#define HZIP_CORE_DFX_COMP_1 0x303000
44#define HZIP_CORE_DFX_DECOMP_0 0x304000
45#define HZIP_CORE_DFX_DECOMP_1 0x305000
46#define HZIP_CORE_DFX_DECOMP_2 0x306000
47#define HZIP_CORE_DFX_DECOMP_3 0x307000
48#define HZIP_CORE_DFX_DECOMP_4 0x308000
49#define HZIP_CORE_DFX_DECOMP_5 0x309000
50#define HZIP_CORE_REGS_BASE_LEN 0xB0
51#define HZIP_CORE_REGS_DFX_LEN 0x28
52#define HZIP_CORE_ADDR_INTRVL 0x1000
53
54#define HZIP_CORE_INT_SOURCE 0x3010A0
55#define HZIP_CORE_INT_MASK_REG 0x3010A4
56#define HZIP_CORE_INT_SET 0x3010A8
57#define HZIP_CORE_INT_STATUS 0x3010AC
58#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
59#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
60#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
61#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
62#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
63#define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0
64#define HZIP_OOO_SHUTDOWN_SEL 0x30120C
65#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
66#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
67#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
68#define HZIP_AXI_ERROR_MASK (BIT(2) | BIT(3))
69#define HZIP_SQE_SIZE 128
70#define HZIP_PF_DEF_Q_NUM 64
71#define HZIP_PF_DEF_Q_BASE 0
72#define HZIP_CTX_Q_NUM_DEF 2
73
74#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
75#define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
76#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
77#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
78#define HZIP_WR_PORT BIT(11)
79
80#define HZIP_ALG_ZLIB_BIT GENMASK(1, 0)
81#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
82#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
83#define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
84#define HZIP_ALG_LZ4_BIT GENMASK(9, 8)
85
86#define HZIP_BUF_SIZE 22
87#define HZIP_SQE_MASK_OFFSET 64
88#define HZIP_SQE_MASK_LEN 48
89
90#define HZIP_CNT_CLR_CE_EN BIT(0)
91#define HZIP_RO_CNT_CLR_CE_EN BIT(2)
92#define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \
93 HZIP_RO_CNT_CLR_CE_EN)
94
95#define HZIP_PREFETCH_CFG 0x3011B0
96#define HZIP_SVA_TRANS 0x3011C4
97#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
98#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
99#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
100#define HZIP_SVA_PREFETCH_NUM GENMASK(18, 16)
101#define HZIP_SVA_STALL_NUM GENMASK(15, 0)
102#define HZIP_SHAPER_RATE_COMPRESS 750
103#define HZIP_SHAPER_RATE_DECOMPRESS 140
104#define HZIP_DELAY_1_US 1
105#define HZIP_POLL_TIMEOUT_US 1000
106#define HZIP_WAIT_SVA_READY 500000
107#define HZIP_READ_SVA_STATUS_TIMES 3
108#define HZIP_WAIT_US_MIN 10
109#define HZIP_WAIT_US_MAX 20
110
111/* clock gating */
112#define HZIP_PEH_CFG_AUTO_GATE 0x3011A8
113#define HZIP_PEH_CFG_AUTO_GATE_EN BIT(0)
114#define HZIP_CORE_GATED_EN GENMASK(15, 8)
115#define HZIP_CORE_GATED_OOO_EN BIT(29)
116#define HZIP_CLOCK_GATED_EN (HZIP_CORE_GATED_EN | \
117 HZIP_CORE_GATED_OOO_EN)
118
119/* zip comp high performance */
120#define HZIP_HIGH_PERF_OFFSET 0x301208
121
122#define HZIP_LIT_LEN_EN_OFFSET 0x301204
123#define HZIP_LIT_LEN_EN_EN BIT(4)
124
125#define HZIP_MAX_CHANNEL_NUM 3
126
127enum {
128 HZIP_HIGH_COMP_RATE,
129 HZIP_HIGH_COMP_PERF,
130};
131
132static const char hisi_zip_name[] = "hisi_zip";
133static struct dentry *hzip_debugfs_root;
134
135struct hisi_zip_hw_error {
136 u32 int_msk;
137 const char *msg;
138};
139
140struct zip_dfx_item {
141 const char *name;
142 u32 offset;
143};
144
145static const struct qm_dev_alg zip_dev_algs[] = { {
146 .alg_msk = HZIP_ALG_ZLIB_BIT,
147 .alg = "zlib\n",
148 }, {
149 .alg_msk = HZIP_ALG_GZIP_BIT,
150 .alg = "gzip\n",
151 }, {
152 .alg_msk = HZIP_ALG_DEFLATE_BIT,
153 .alg = "deflate\n",
154 }, {
155 .alg_msk = HZIP_ALG_LZ77_BIT,
156 .alg = "lz77_zstd\n",
157 }, {
158 .alg_msk = HZIP_ALG_LZ77_BIT,
159 .alg = "lz77_only\n",
160 }, {
161 .alg_msk = HZIP_ALG_LZ4_BIT,
162 .alg = "lz4\n",
163 },
164};
165
166static struct hisi_qm_list zip_devices = {
167 .register_to_crypto = hisi_zip_register_to_crypto,
168 .unregister_from_crypto = hisi_zip_unregister_from_crypto,
169};
170
171static struct zip_dfx_item zip_dfx_files[] = {
172 {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
173 {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
174 {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)},
175 {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)},
176};
177
178static const struct hisi_zip_hw_error zip_hw_error[] = {
179 { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
180 { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
181 { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" },
182 { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" },
183 { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" },
184 { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" },
185 { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" },
186 { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" },
187 { .int_msk = BIT(8), .msg = "zip_com_inf_err" },
188 { .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
189 { .int_msk = BIT(10), .msg = "zip_pre_out_err" },
190 { .int_msk = BIT(11), .msg = "zip_axi_poison_err" },
191 { .int_msk = BIT(12), .msg = "zip_sva_err" },
192 { /* sentinel */ }
193};
194
195enum ctrl_debug_file_index {
196 HZIP_CLEAR_ENABLE,
197 HZIP_DEBUG_FILE_NUM,
198};
199
200static const char * const ctrl_debug_file_name[] = {
201 [HZIP_CLEAR_ENABLE] = "clear_enable",
202};
203
204struct ctrl_debug_file {
205 enum ctrl_debug_file_index index;
206 spinlock_t lock;
207 struct hisi_zip_ctrl *ctrl;
208};
209
210/*
211 * One ZIP controller has one PF and multiple VFs, some global configurations
212 * which PF has need this structure.
213 *
214 * Just relevant for PF.
215 */
216struct hisi_zip_ctrl {
217 struct hisi_zip *hisi_zip;
218 struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
219};
220
221enum zip_cap_type {
222 ZIP_QM_NFE_MASK_CAP = 0x0,
223 ZIP_QM_RESET_MASK_CAP,
224 ZIP_QM_OOO_SHUTDOWN_MASK_CAP,
225 ZIP_QM_CE_MASK_CAP,
226 ZIP_NFE_MASK_CAP,
227 ZIP_RESET_MASK_CAP,
228 ZIP_OOO_SHUTDOWN_MASK_CAP,
229 ZIP_CE_MASK_CAP,
230 ZIP_CLUSTER_NUM_CAP,
231 ZIP_CORE_TYPE_NUM_CAP,
232 ZIP_CORE_NUM_CAP,
233 ZIP_CLUSTER_COMP_NUM_CAP,
234 ZIP_CLUSTER_DECOMP_NUM_CAP,
235 ZIP_DECOMP_ENABLE_BITMAP,
236 ZIP_COMP_ENABLE_BITMAP,
237 ZIP_DRV_ALG_BITMAP,
238 ZIP_DEV_ALG_BITMAP,
239 ZIP_CORE1_ALG_BITMAP,
240 ZIP_CORE2_ALG_BITMAP,
241 ZIP_CORE3_ALG_BITMAP,
242 ZIP_CORE4_ALG_BITMAP,
243 ZIP_CORE5_ALG_BITMAP,
244 ZIP_CAP_MAX
245};
246
247static struct hisi_qm_cap_info zip_basic_cap_info[] = {
248 {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77},
249 {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77},
250 {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
251 {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
252 {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE},
253 {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE},
254 {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE},
255 {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
256 {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1},
257 {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2},
258 {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5},
259 {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2},
260 {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3},
261 {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C},
262 {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3},
263 {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x0, 0x30},
264 {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0x3F},
265 {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
266 {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
267 {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
268 {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
269 {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
270 {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
271};
272
273static const struct hisi_qm_cap_query_info zip_cap_query_info[] = {
274 {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C57, 0x7C77},
275 {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC57, 0x6C77},
276 {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
277 {ZIP_RAS_NFE_TYPE, "ZIP_RAS_NFE_TYPE ", 0x3130, 0x0, 0x7FE, 0x1FFE},
278 {ZIP_RAS_NFE_RESET, "ZIP_RAS_NFE_RESET ", 0x3134, 0x0, 0x7FE, 0x7FE},
279 {ZIP_RAS_CE_TYPE, "ZIP_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
280 {ZIP_CORE_INFO, "ZIP_CORE_INFO ", 0x313C, 0x12080206, 0x12080206, 0x12050203},
281 {ZIP_CORE_EN, "ZIP_CORE_EN ", 0x3140, 0xFC0003, 0xFC0003, 0x1C0003},
282 {ZIP_DRV_ALG_BITMAP_TB, "ZIP_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x0, 0x30},
283 {ZIP_ALG_BITMAP, "ZIP_ALG_BITMAP ", 0x3148, 0xF, 0xF, 0x3F},
284 {ZIP_CORE1_BITMAP, "ZIP_CORE1_BITMAP ", 0x314C, 0x5, 0x5, 0xD5},
285 {ZIP_CORE2_BITMAP, "ZIP_CORE2_BITMAP ", 0x3150, 0x5, 0x5, 0xD5},
286 {ZIP_CORE3_BITMAP, "ZIP_CORE3_BITMAP ", 0x3154, 0xA, 0xA, 0x2A},
287 {ZIP_CORE4_BITMAP, "ZIP_CORE4_BITMAP ", 0x3158, 0xA, 0xA, 0x2A},
288 {ZIP_CORE5_BITMAP, "ZIP_CORE5_BITMAP ", 0x315C, 0xA, 0xA, 0x2A},
289};
290
291static const struct debugfs_reg32 hzip_dfx_regs[] = {
292 {"HZIP_GET_BD_NUM ", 0x00},
293 {"HZIP_GET_RIGHT_BD ", 0x04},
294 {"HZIP_GET_ERROR_BD ", 0x08},
295 {"HZIP_DONE_BD_NUM ", 0x0c},
296 {"HZIP_WORK_CYCLE ", 0x10},
297 {"HZIP_IDLE_CYCLE ", 0x18},
298 {"HZIP_MAX_DELAY ", 0x20},
299 {"HZIP_MIN_DELAY ", 0x24},
300 {"HZIP_AVG_DELAY ", 0x28},
301 {"HZIP_MEM_VISIBLE_DATA ", 0x30},
302 {"HZIP_MEM_VISIBLE_ADDR ", 0x34},
303 {"HZIP_CONSUMED_BYTE ", 0x38},
304 {"HZIP_PRODUCED_BYTE ", 0x40},
305 {"HZIP_COMP_INF ", 0x70},
306 {"HZIP_PRE_OUT ", 0x78},
307 {"HZIP_BD_RD ", 0x7c},
308 {"HZIP_BD_WR ", 0x80},
309 {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84},
310 {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88},
311 {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8c},
312 {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94},
313 {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9c},
314};
315
316static const struct debugfs_reg32 hzip_com_dfx_regs[] = {
317 {"HZIP_CLOCK_GATE_CTRL ", 0x301004},
318 {"HZIP_CORE_INT_RAS_CE_ENB ", 0x301160},
319 {"HZIP_CORE_INT_RAS_NFE_ENB ", 0x301164},
320 {"HZIP_CORE_INT_RAS_FE_ENB ", 0x301168},
321 {"HZIP_UNCOM_ERR_RAS_CTRL ", 0x30116C},
322};
323
324static const struct debugfs_reg32 hzip_dump_dfx_regs[] = {
325 {"HZIP_GET_BD_NUM ", 0x00},
326 {"HZIP_GET_RIGHT_BD ", 0x04},
327 {"HZIP_GET_ERROR_BD ", 0x08},
328 {"HZIP_DONE_BD_NUM ", 0x0c},
329 {"HZIP_MAX_DELAY ", 0x20},
330};
331
332/* define the ZIP's dfx regs region and region length */
333static struct dfx_diff_registers hzip_diff_regs[] = {
334 {
335 .reg_offset = HZIP_CORE_DFX_BASE,
336 .reg_len = HZIP_CORE_REGS_BASE_LEN,
337 }, {
338 .reg_offset = HZIP_CORE_DFX_COMP_0,
339 .reg_len = HZIP_CORE_REGS_DFX_LEN,
340 }, {
341 .reg_offset = HZIP_CORE_DFX_COMP_1,
342 .reg_len = HZIP_CORE_REGS_DFX_LEN,
343 }, {
344 .reg_offset = HZIP_CORE_DFX_DECOMP_0,
345 .reg_len = HZIP_CORE_REGS_DFX_LEN,
346 }, {
347 .reg_offset = HZIP_CORE_DFX_DECOMP_1,
348 .reg_len = HZIP_CORE_REGS_DFX_LEN,
349 }, {
350 .reg_offset = HZIP_CORE_DFX_DECOMP_2,
351 .reg_len = HZIP_CORE_REGS_DFX_LEN,
352 }, {
353 .reg_offset = HZIP_CORE_DFX_DECOMP_3,
354 .reg_len = HZIP_CORE_REGS_DFX_LEN,
355 }, {
356 .reg_offset = HZIP_CORE_DFX_DECOMP_4,
357 .reg_len = HZIP_CORE_REGS_DFX_LEN,
358 }, {
359 .reg_offset = HZIP_CORE_DFX_DECOMP_5,
360 .reg_len = HZIP_CORE_REGS_DFX_LEN,
361 },
362};
363
364static const char *zip_channel_name[HZIP_MAX_CHANNEL_NUM] = {
365 "COMPRESS",
366 "DECOMPRESS",
367 "DAE"
368};
369
370static int hzip_diff_regs_show(struct seq_file *s, void *unused)
371{
372 struct hisi_qm *qm = s->private;
373
374 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
375 ARRAY_SIZE(hzip_diff_regs));
376
377 return 0;
378}
379DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs);
380
381static int perf_mode_set(const char *val, const struct kernel_param *kp)
382{
383 int ret;
384 u32 n;
385
386 if (!val)
387 return -EINVAL;
388
389 ret = kstrtou32(val, 10, &n);
390 if (ret != 0 || (n != HZIP_HIGH_COMP_PERF &&
391 n != HZIP_HIGH_COMP_RATE))
392 return -EINVAL;
393
394 return param_set_int(val, kp);
395}
396
397static const struct kernel_param_ops zip_com_perf_ops = {
398 .set = perf_mode_set,
399 .get = param_get_int,
400};
401
402/*
403 * perf_mode = 0 means enable high compression rate mode,
404 * perf_mode = 1 means enable high compression performance mode.
405 * These two modes only apply to the compression direction.
406 */
407static u32 perf_mode = HZIP_HIGH_COMP_RATE;
408module_param_cb(perf_mode, &zip_com_perf_ops, &perf_mode, 0444);
409MODULE_PARM_DESC(perf_mode, "ZIP high perf mode 0(default), 1(enable)");
410
411static const struct kernel_param_ops zip_uacce_mode_ops = {
412 .set = uacce_mode_set,
413 .get = param_get_int,
414};
415
416/*
417 * uacce_mode = 0 means zip only register to crypto,
418 * uacce_mode = 1 means zip both register to crypto and uacce.
419 */
420static u32 uacce_mode = UACCE_MODE_NOUACCE;
421module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
422MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
423
424static bool pf_q_num_flag;
425static int pf_q_num_set(const char *val, const struct kernel_param *kp)
426{
427 pf_q_num_flag = true;
428
429 return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
430}
431
432static const struct kernel_param_ops pf_q_num_ops = {
433 .set = pf_q_num_set,
434 .get = param_get_int,
435};
436
437static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
438module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
439MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
440
441static const struct kernel_param_ops vfs_num_ops = {
442 .set = vfs_num_set,
443 .get = param_get_int,
444};
445
446static u32 vfs_num;
447module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
448MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
449
450static const struct pci_device_id hisi_zip_dev_ids[] = {
451 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_PF) },
452 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
453 { 0, }
454};
455MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
456
457int zip_create_qps(struct hisi_qp **qps, int qp_num, int node, u8 *alg_type)
458{
459 if (node == NUMA_NO_NODE)
460 node = cpu_to_node(raw_smp_processor_id());
461
462 return hisi_qm_alloc_qps_node(&zip_devices, qp_num, alg_type, node, qps);
463}
464
465bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
466{
467 u32 cap_val;
468
469 cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_TB].cap_val;
470 if ((alg & cap_val) == alg)
471 return true;
472
473 return false;
474}
475
476static void hisi_zip_literal_set(struct hisi_qm *qm)
477{
478 u32 val;
479
480 if (qm->ver < QM_HW_V3)
481 return;
482
483 val = readl_relaxed(qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
484 val &= ~HZIP_LIT_LEN_EN_EN;
485
486 /* enable literal length in stream mode compression */
487 writel(val, qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
488}
489
490static void hisi_zip_set_high_perf(struct hisi_qm *qm)
491{
492 u32 val;
493
494 val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
495 if (perf_mode == HZIP_HIGH_COMP_PERF)
496 val |= HZIP_HIGH_COMP_PERF;
497 else
498 val &= ~HZIP_HIGH_COMP_PERF;
499
500 /* Set perf mode */
501 writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
502}
503
504static int hisi_zip_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
505{
506 u32 val, try_times = 0;
507 u8 count = 0;
508
509 /*
510 * Read the register value every 10-20us. If the value is 0 for three
511 * consecutive times, the SVA module is ready.
512 */
513 do {
514 val = readl(qm->io_base + offset);
515 if (val & mask)
516 count = 0;
517 else if (++count == HZIP_READ_SVA_STATUS_TIMES)
518 break;
519
520 usleep_range(HZIP_WAIT_US_MIN, HZIP_WAIT_US_MAX);
521 } while (++try_times < HZIP_WAIT_SVA_READY);
522
523 if (try_times == HZIP_WAIT_SVA_READY) {
524 pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
525 return -ETIMEDOUT;
526 }
527
528 return 0;
529}
530
531static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
532{
533 u32 val;
534 int ret;
535
536 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
537 return;
538
539 val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
540 val |= HZIP_SVA_PREFETCH_DISABLE;
541 writel(val, qm->io_base + HZIP_PREFETCH_CFG);
542
543 ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
544 val, !(val & HZIP_SVA_DISABLE_READY),
545 HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
546 if (ret)
547 pci_err(qm->pdev, "failed to close sva prefetch\n");
548
549 (void)hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_STALL_NUM);
550}
551
552static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
553{
554 u32 val;
555 int ret;
556
557 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
558 return;
559
560 /* Enable prefetch */
561 val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
562 val &= HZIP_PREFETCH_ENABLE;
563 writel(val, qm->io_base + HZIP_PREFETCH_CFG);
564
565 ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
566 val, !(val & HZIP_SVA_PREFETCH_DISABLE),
567 HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
568 if (ret) {
569 pci_err(qm->pdev, "failed to open sva prefetch\n");
570 hisi_zip_close_sva_prefetch(qm);
571 return;
572 }
573
574 ret = hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_PREFETCH_NUM);
575 if (ret)
576 hisi_zip_close_sva_prefetch(qm);
577}
578
579static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
580{
581 u32 val;
582
583 if (qm->ver < QM_HW_V3)
584 return;
585
586 val = readl(qm->io_base + HZIP_CLOCK_GATE_CTRL);
587 val |= HZIP_CLOCK_GATED_EN;
588 writel(val, qm->io_base + HZIP_CLOCK_GATE_CTRL);
589
590 val = readl(qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
591 val |= HZIP_PEH_CFG_AUTO_GATE_EN;
592 writel(val, qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
593}
594
595static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
596{
597 void __iomem *base = qm->io_base;
598 u32 dcomp_bm, comp_bm;
599 u32 zip_core_en;
600 int ret;
601
602 /* qm user domain */
603 writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
604 writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
605 writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
606 writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
607 writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
608
609 /* qm cache */
610 writel(AXI_M_CFG, base + QM_AXI_M_CFG);
611 writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
612
613 /* disable FLR triggered by BME(bus master enable) */
614 writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
615 writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
616
617 /* cache */
618 writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
619 writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
620 writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
621 writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
622
623 /* user domain configurations */
624 writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
625 writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
626
627 if (qm->use_sva && qm->ver == QM_HW_V2) {
628 writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
629 writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
630 writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_SGL_RUSER_32_63);
631 } else {
632 writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
633 writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
634 writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
635 }
636 hisi_zip_open_sva_prefetch(qm);
637
638 /* let's open all compression/decompression cores */
639
640 zip_core_en = qm->cap_tables.dev_cap_table[ZIP_CORE_EN].cap_val;
641 dcomp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].shift) &
642 zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].mask;
643 comp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].shift) &
644 zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].mask;
645 writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
646
647 /* enable sqc,cqc writeback */
648 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
649 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
650 FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
651
652 hisi_zip_set_high_perf(qm);
653 hisi_zip_literal_set(qm);
654 hisi_zip_enable_clock_gate(qm);
655
656 ret = hisi_dae_set_user_domain(qm);
657 if (ret)
658 goto close_sva_prefetch;
659
660 return 0;
661
662close_sva_prefetch:
663 hisi_zip_close_sva_prefetch(qm);
664 return ret;
665}
666
667static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
668{
669 u32 val1, val2;
670
671 val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
672 if (enable) {
673 val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
674 val2 = qm->err_info.dev_err.shutdown_mask;
675 } else {
676 val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
677 val2 = 0x0;
678 }
679
680 if (qm->ver > QM_HW_V2)
681 writel(val2, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
682
683 writel(val1, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
684}
685
686static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
687{
688 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
689 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
690
691 if (qm->ver == QM_HW_V1) {
692 writel(HZIP_CORE_INT_MASK_ALL,
693 qm->io_base + HZIP_CORE_INT_MASK_REG);
694 dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
695 return;
696 }
697
698 /* clear ZIP hw error source if having */
699 writel(err_mask, qm->io_base + HZIP_CORE_INT_SOURCE);
700
701 /* configure error type */
702 writel(dev_err->ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
703 writel(dev_err->fe, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
704 writel(dev_err->nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
705
706 hisi_zip_master_ooo_ctrl(qm, true);
707
708 /* enable ZIP hw error interrupts */
709 writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
710
711 hisi_dae_hw_error_enable(qm);
712}
713
714static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
715{
716 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
717 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
718
719 /* disable ZIP hw error interrupts */
720 writel(err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
721
722 hisi_zip_master_ooo_ctrl(qm, false);
723
724 hisi_dae_hw_error_disable(qm);
725}
726
727static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
728{
729 struct hisi_zip *hisi_zip = file->ctrl->hisi_zip;
730
731 return &hisi_zip->qm;
732}
733
734static u32 clear_enable_read(struct hisi_qm *qm)
735{
736 return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
737 HZIP_SOFT_CTRL_CNT_CLR_CE_BIT;
738}
739
740static int clear_enable_write(struct hisi_qm *qm, u32 val)
741{
742 u32 tmp;
743
744 if (val != 1 && val != 0)
745 return -EINVAL;
746
747 tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
748 ~HZIP_SOFT_CTRL_CNT_CLR_CE_BIT) | val;
749 writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
750
751 return 0;
752}
753
754static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
755 size_t count, loff_t *pos)
756{
757 struct ctrl_debug_file *file = filp->private_data;
758 struct hisi_qm *qm = file_to_qm(file);
759 char tbuf[HZIP_BUF_SIZE];
760 u32 val;
761 int ret;
762
763 ret = hisi_qm_get_dfx_access(qm);
764 if (ret)
765 return ret;
766
767 spin_lock_irq(&file->lock);
768 switch (file->index) {
769 case HZIP_CLEAR_ENABLE:
770 val = clear_enable_read(qm);
771 break;
772 default:
773 goto err_input;
774 }
775 spin_unlock_irq(&file->lock);
776
777 hisi_qm_put_dfx_access(qm);
778 ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val);
779 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
780
781err_input:
782 spin_unlock_irq(&file->lock);
783 hisi_qm_put_dfx_access(qm);
784 return -EINVAL;
785}
786
787static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
788 const char __user *buf,
789 size_t count, loff_t *pos)
790{
791 struct ctrl_debug_file *file = filp->private_data;
792 struct hisi_qm *qm = file_to_qm(file);
793 char tbuf[HZIP_BUF_SIZE];
794 unsigned long val;
795 int len, ret;
796
797 if (*pos != 0)
798 return 0;
799
800 if (count >= HZIP_BUF_SIZE)
801 return -ENOSPC;
802
803 len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count);
804 if (len < 0)
805 return len;
806
807 tbuf[len] = '\0';
808 ret = kstrtoul(tbuf, 0, &val);
809 if (ret)
810 return ret;
811
812 ret = hisi_qm_get_dfx_access(qm);
813 if (ret)
814 return ret;
815
816 spin_lock_irq(&file->lock);
817 switch (file->index) {
818 case HZIP_CLEAR_ENABLE:
819 ret = clear_enable_write(qm, val);
820 if (ret)
821 goto err_input;
822 break;
823 default:
824 ret = -EINVAL;
825 goto err_input;
826 }
827
828 ret = count;
829
830err_input:
831 spin_unlock_irq(&file->lock);
832 hisi_qm_put_dfx_access(qm);
833 return ret;
834}
835
836static const struct file_operations ctrl_debug_fops = {
837 .owner = THIS_MODULE,
838 .open = simple_open,
839 .read = hisi_zip_ctrl_debug_read,
840 .write = hisi_zip_ctrl_debug_write,
841};
842
843static int zip_debugfs_atomic64_set(void *data, u64 val)
844{
845 if (val)
846 return -EINVAL;
847
848 atomic64_set((atomic64_t *)data, 0);
849
850 return 0;
851}
852
853static int zip_debugfs_atomic64_get(void *data, u64 *val)
854{
855 *val = atomic64_read((atomic64_t *)data);
856
857 return 0;
858}
859
860DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
861 zip_debugfs_atomic64_set, "%llu\n");
862
863static int hisi_zip_regs_show(struct seq_file *s, void *unused)
864{
865 hisi_qm_regs_dump(s, s->private);
866
867 return 0;
868}
869
870DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
871
872static void __iomem *get_zip_core_addr(struct hisi_qm *qm, int core_num)
873{
874 u8 zip_comp_core_num;
875 u32 zip_core_info;
876
877 zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
878 zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
879 zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
880
881 if (core_num < zip_comp_core_num)
882 return qm->io_base + HZIP_CORE_DFX_BASE +
883 (core_num + 1) * HZIP_CORE_ADDR_INTRVL;
884
885 return qm->io_base + HZIP_CORE_DFX_DECOMP_BASE +
886 (core_num - zip_comp_core_num) * HZIP_CORE_ADDR_INTRVL;
887}
888
889static int hisi_zip_core_debug_init(struct hisi_qm *qm)
890{
891 u32 zip_core_num, zip_comp_core_num;
892 struct device *dev = &qm->pdev->dev;
893 struct debugfs_regset32 *regset;
894 u32 zip_core_info;
895 struct dentry *tmp_d;
896 char buf[HZIP_BUF_SIZE];
897 int i;
898
899 zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
900 zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
901 zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
902 zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
903 zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
904
905 for (i = 0; i < zip_core_num; i++) {
906 if (i < zip_comp_core_num)
907 scnprintf(buf, sizeof(buf), "comp_core%d", i);
908 else
909 scnprintf(buf, sizeof(buf), "decomp_core%d",
910 i - zip_comp_core_num);
911
912 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
913 if (!regset)
914 return -ENOENT;
915
916 regset->regs = hzip_dfx_regs;
917 regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
918 regset->base = get_zip_core_addr(qm, i);
919 regset->dev = dev;
920
921 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
922 debugfs_create_file("regs", 0444, tmp_d, regset,
923 &hisi_zip_regs_fops);
924 }
925
926 return 0;
927}
928
929static int zip_cap_regs_show(struct seq_file *s, void *unused)
930{
931 struct hisi_qm *qm = s->private;
932 u32 i, size;
933
934 size = qm->cap_tables.qm_cap_size;
935 for (i = 0; i < size; i++)
936 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
937 qm->cap_tables.qm_cap_table[i].cap_val);
938
939 size = qm->cap_tables.dev_cap_size;
940 for (i = 0; i < size; i++)
941 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
942 qm->cap_tables.dev_cap_table[i].cap_val);
943
944 return 0;
945}
946
947DEFINE_SHOW_ATTRIBUTE(zip_cap_regs);
948
949static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
950{
951 struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs;
952 struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
953 struct hisi_zip_dfx *dfx = &zip->dfx;
954 struct dentry *tmp_dir;
955 void *data;
956 int i;
957
958 tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root);
959 for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
960 data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
961 debugfs_create_file(zip_dfx_files[i].name,
962 0644, tmp_dir, data,
963 &zip_atomic64_ops);
964 }
965
966 if (qm->fun_type == QM_HW_PF && hzip_regs)
967 debugfs_create_file("diff_regs", 0444, tmp_dir,
968 qm, &hzip_diff_regs_fops);
969
970 debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
971 qm->debug.debug_root, qm, &zip_cap_regs_fops);
972}
973
974static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
975{
976 struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
977 int i;
978
979 for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) {
980 spin_lock_init(&zip->ctrl->files[i].lock);
981 zip->ctrl->files[i].ctrl = zip->ctrl;
982 zip->ctrl->files[i].index = i;
983
984 debugfs_create_file(ctrl_debug_file_name[i], 0600,
985 qm->debug.debug_root,
986 zip->ctrl->files + i,
987 &ctrl_debug_fops);
988 }
989
990 return hisi_zip_core_debug_init(qm);
991}
992
993static int hisi_zip_debugfs_init(struct hisi_qm *qm)
994{
995 struct device *dev = &qm->pdev->dev;
996 int ret;
997
998 ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs));
999 if (ret) {
1000 dev_warn(dev, "Failed to init ZIP diff regs!\n");
1001 return ret;
1002 }
1003
1004 qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
1005 qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
1006 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
1007 hzip_debugfs_root);
1008
1009 hisi_qm_debug_init(qm);
1010
1011 if (qm->fun_type == QM_HW_PF) {
1012 ret = hisi_zip_ctrl_debug_init(qm);
1013 if (ret)
1014 goto debugfs_remove;
1015 }
1016
1017 hisi_zip_dfx_debug_init(qm);
1018
1019 return 0;
1020
1021debugfs_remove:
1022 debugfs_remove_recursive(qm->debug.debug_root);
1023 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
1024 return ret;
1025}
1026
1027/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
1028static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
1029{
1030 u32 zip_core_info;
1031 u8 zip_core_num;
1032 int i, j;
1033
1034 zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
1035 zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
1036 zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
1037
1038 /* enable register read_clear bit */
1039 writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
1040 for (i = 0; i < zip_core_num; i++)
1041 for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++)
1042 readl(get_zip_core_addr(qm, i) +
1043 hzip_dfx_regs[j].offset);
1044
1045 /* disable register read_clear bit */
1046 writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
1047
1048 hisi_qm_debug_regs_clear(qm);
1049}
1050
1051static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
1052{
1053 debugfs_remove_recursive(qm->debug.debug_root);
1054
1055 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
1056
1057 if (qm->fun_type == QM_HW_PF) {
1058 hisi_zip_debug_regs_clear(qm);
1059 qm->debug.curr_qm_qp_num = 0;
1060 }
1061}
1062
1063static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
1064{
1065 int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
1066 int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
1067 struct qm_debug *debug = &qm->debug;
1068 void __iomem *io_base;
1069 u32 zip_core_info;
1070 u32 zip_core_num;
1071 int i, j, idx;
1072
1073 zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
1074 zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
1075 zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
1076
1077 debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
1078 sizeof(unsigned int), GFP_KERNEL);
1079 if (!debug->last_words)
1080 return -ENOMEM;
1081
1082 for (i = 0; i < com_dfx_regs_num; i++) {
1083 io_base = qm->io_base + hzip_com_dfx_regs[i].offset;
1084 debug->last_words[i] = readl_relaxed(io_base);
1085 }
1086
1087 for (i = 0; i < zip_core_num; i++) {
1088 io_base = get_zip_core_addr(qm, i);
1089 for (j = 0; j < core_dfx_regs_num; j++) {
1090 idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
1091 debug->last_words[idx] = readl_relaxed(
1092 io_base + hzip_dump_dfx_regs[j].offset);
1093 }
1094 }
1095
1096 return 0;
1097}
1098
1099static void hisi_zip_show_last_regs_uninit(struct hisi_qm *qm)
1100{
1101 struct qm_debug *debug = &qm->debug;
1102
1103 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1104 return;
1105
1106 kfree(debug->last_words);
1107 debug->last_words = NULL;
1108}
1109
1110static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
1111{
1112 int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
1113 int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
1114 u32 zip_core_num, zip_comp_core_num;
1115 struct qm_debug *debug = &qm->debug;
1116 char buf[HZIP_BUF_SIZE];
1117 u32 zip_core_info;
1118 void __iomem *base;
1119 int i, j, idx;
1120 u32 val;
1121
1122 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1123 return;
1124
1125 for (i = 0; i < com_dfx_regs_num; i++) {
1126 val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset);
1127 if (debug->last_words[i] != val)
1128 pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n",
1129 hzip_com_dfx_regs[i].name, debug->last_words[i], val);
1130 }
1131
1132 zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
1133 zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
1134 zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
1135 zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
1136 zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
1137
1138 for (i = 0; i < zip_core_num; i++) {
1139 if (i < zip_comp_core_num)
1140 scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
1141 else
1142 scnprintf(buf, sizeof(buf), "Decomp_core-%d",
1143 i - zip_comp_core_num);
1144 base = get_zip_core_addr(qm, i);
1145
1146 pci_info(qm->pdev, "==>%s:\n", buf);
1147 /* dump last word for dfx regs during control resetting */
1148 for (j = 0; j < core_dfx_regs_num; j++) {
1149 idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
1150 val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset);
1151 if (debug->last_words[idx] != val)
1152 pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n",
1153 hzip_dump_dfx_regs[j].name,
1154 debug->last_words[idx], val);
1155 }
1156 }
1157}
1158
1159static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
1160{
1161 const struct hisi_zip_hw_error *err = zip_hw_error;
1162 struct device *dev = &qm->pdev->dev;
1163 u32 err_val;
1164
1165 while (err->msg) {
1166 if (err->int_msk & err_sts) {
1167 dev_err(dev, "%s [error status=0x%x] found\n",
1168 err->msg, err->int_msk);
1169
1170 if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
1171 err_val = readl(qm->io_base +
1172 HZIP_CORE_SRAM_ECC_ERR_INFO);
1173 dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
1174 ((err_val >>
1175 HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
1176 }
1177 }
1178 err++;
1179 }
1180}
1181
1182static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
1183{
1184 return readl(qm->io_base + HZIP_CORE_INT_STATUS);
1185}
1186
1187static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
1188{
1189 writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
1190}
1191
1192static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
1193{
1194 u32 nfe_mask = qm->err_info.dev_err.nfe;
1195
1196 writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
1197}
1198
1199static void hisi_zip_enable_error_report(struct hisi_qm *qm)
1200{
1201 u32 nfe_mask = qm->err_info.dev_err.nfe;
1202 u32 ce_mask = qm->err_info.dev_err.ce;
1203
1204 writel(nfe_mask, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
1205 writel(ce_mask, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
1206}
1207
1208static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
1209{
1210 u32 val;
1211
1212 val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
1213
1214 writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE,
1215 qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
1216
1217 writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
1218 qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
1219
1220 hisi_dae_open_axi_master_ooo(qm);
1221}
1222
1223static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
1224{
1225 u32 nfe_enb;
1226
1227 /* Disable ECC Mbit error report. */
1228 nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
1229 writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
1230 qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
1231
1232 /* Inject zip ECC Mbit error to block master ooo. */
1233 writel(HZIP_CORE_INT_STATUS_M_ECC,
1234 qm->io_base + HZIP_CORE_INT_SET);
1235}
1236
1237static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
1238{
1239 enum acc_err_result zip_result = ACC_ERR_NONE;
1240 enum acc_err_result dae_result;
1241 u32 err_status;
1242
1243 /* Get device hardware new error status */
1244 err_status = hisi_zip_get_hw_err_status(qm);
1245 if (err_status) {
1246 if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
1247 qm->err_status.is_dev_ecc_mbit = true;
1248 hisi_zip_log_hw_error(qm, err_status);
1249
1250 if (err_status & qm->err_info.dev_err.reset_mask) {
1251 /* Disable the same error reporting until device is recovered. */
1252 hisi_zip_disable_error_report(qm, err_status);
1253 zip_result = ACC_ERR_NEED_RESET;
1254 } else {
1255 hisi_zip_clear_hw_err_status(qm, err_status);
1256 /* Avoid firmware disable error report, re-enable. */
1257 hisi_zip_enable_error_report(qm);
1258 }
1259 }
1260
1261 dae_result = hisi_dae_get_err_result(qm);
1262
1263 return (zip_result == ACC_ERR_NEED_RESET ||
1264 dae_result == ACC_ERR_NEED_RESET) ?
1265 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
1266}
1267
1268static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
1269{
1270 u32 err_status;
1271
1272 err_status = hisi_zip_get_hw_err_status(qm);
1273 if (err_status & qm->err_info.dev_err.shutdown_mask)
1274 return true;
1275
1276 return hisi_dae_dev_is_abnormal(qm);
1277}
1278
1279static int hisi_zip_set_priv_status(struct hisi_qm *qm)
1280{
1281 return hisi_dae_close_axi_master_ooo(qm);
1282}
1283
1284static void hisi_zip_disable_axi_error(struct hisi_qm *qm)
1285{
1286 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
1287 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
1288 u32 val;
1289
1290 val = ~(err_mask & (~HZIP_AXI_ERROR_MASK));
1291 writel(val, qm->io_base + HZIP_CORE_INT_MASK_REG);
1292
1293 if (qm->ver > QM_HW_V2)
1294 writel(dev_err->shutdown_mask & (~HZIP_AXI_ERROR_MASK),
1295 qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
1296}
1297
1298static void hisi_zip_enable_axi_error(struct hisi_qm *qm)
1299{
1300 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
1301 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
1302
1303 /* clear axi error source */
1304 writel(HZIP_AXI_ERROR_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
1305
1306 writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
1307
1308 if (qm->ver > QM_HW_V2)
1309 writel(dev_err->shutdown_mask, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
1310}
1311
1312static void hisi_zip_err_info_init(struct hisi_qm *qm)
1313{
1314 struct hisi_qm_err_info *err_info = &qm->err_info;
1315 struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
1316 struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
1317
1318 qm_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
1319 qm_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
1320 qm_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
1321 ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
1322 qm_err->ecc_2bits_mask = QM_ECC_MBIT;
1323 qm_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
1324 ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
1325 qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
1326 ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1327
1328 dev_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
1329 dev_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
1330 dev_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
1331 dev_err->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
1332 dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
1333 ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1334 dev_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
1335 ZIP_RESET_MASK_CAP, qm->cap_ver);
1336
1337 err_info->msi_wr_port = HZIP_WR_PORT;
1338 err_info->acpi_rst = "ZRST";
1339}
1340
1341static const struct hisi_qm_err_ini hisi_zip_err_ini = {
1342 .hw_init = hisi_zip_set_user_domain_and_cache,
1343 .hw_err_enable = hisi_zip_hw_error_enable,
1344 .hw_err_disable = hisi_zip_hw_error_disable,
1345 .get_dev_hw_err_status = hisi_zip_get_hw_err_status,
1346 .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
1347 .open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
1348 .close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
1349 .open_sva_prefetch = hisi_zip_open_sva_prefetch,
1350 .close_sva_prefetch = hisi_zip_close_sva_prefetch,
1351 .show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
1352 .err_info_init = hisi_zip_err_info_init,
1353 .get_err_result = hisi_zip_get_err_result,
1354 .set_priv_status = hisi_zip_set_priv_status,
1355 .dev_is_abnormal = hisi_zip_dev_is_abnormal,
1356 .disable_axi_error = hisi_zip_disable_axi_error,
1357 .enable_axi_error = hisi_zip_enable_axi_error,
1358};
1359
1360static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
1361{
1362 struct hisi_qm *qm = &hisi_zip->qm;
1363 struct hisi_zip_ctrl *ctrl;
1364 int ret;
1365
1366 ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
1367 if (!ctrl)
1368 return -ENOMEM;
1369
1370 hisi_zip->ctrl = ctrl;
1371 ctrl->hisi_zip = hisi_zip;
1372
1373 ret = hisi_zip_set_user_domain_and_cache(qm);
1374 if (ret)
1375 return ret;
1376
1377 hisi_qm_dev_err_init(qm);
1378 hisi_zip_debug_regs_clear(qm);
1379
1380 ret = hisi_zip_show_last_regs_init(qm);
1381 if (ret)
1382 pci_err(qm->pdev, "Failed to init last word regs!\n");
1383
1384 return ret;
1385}
1386
1387static int zip_pre_store_cap_reg(struct hisi_qm *qm)
1388{
1389 struct hisi_qm_cap_record *zip_cap;
1390 struct pci_dev *pdev = qm->pdev;
1391 size_t i, size;
1392
1393 size = ARRAY_SIZE(zip_cap_query_info);
1394 zip_cap = devm_kcalloc(&pdev->dev, size, sizeof(*zip_cap), GFP_KERNEL);
1395 if (!zip_cap)
1396 return -ENOMEM;
1397
1398 for (i = 0; i < size; i++) {
1399 zip_cap[i].type = zip_cap_query_info[i].type;
1400 zip_cap[i].name = zip_cap_query_info[i].name;
1401 zip_cap[i].cap_val = hisi_qm_get_cap_value(qm, zip_cap_query_info,
1402 i, qm->cap_ver);
1403 }
1404
1405 qm->cap_tables.dev_cap_table = zip_cap;
1406 qm->cap_tables.dev_cap_size = size;
1407
1408 return 0;
1409}
1410
1411static void zip_set_channels(struct hisi_qm *qm)
1412{
1413 struct qm_channel *channel_data = &qm->channel_data;
1414 int i;
1415
1416 channel_data->channel_num = HZIP_MAX_CHANNEL_NUM;
1417 for (i = 0; i < HZIP_MAX_CHANNEL_NUM; i++)
1418 channel_data->channel_name[i] = zip_channel_name[i];
1419}
1420
1421static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
1422{
1423 u64 alg_msk;
1424 int ret;
1425
1426 qm->pdev = pdev;
1427 qm->mode = uacce_mode;
1428 qm->sqe_size = HZIP_SQE_SIZE;
1429 qm->dev_name = hisi_zip_name;
1430
1431 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_ZIP_PF) ?
1432 QM_HW_PF : QM_HW_VF;
1433 if (qm->fun_type == QM_HW_PF) {
1434 qm->qp_base = HZIP_PF_DEF_Q_BASE;
1435 qm->qp_num = pf_q_num;
1436 qm->debug.curr_qm_qp_num = pf_q_num;
1437 qm->qm_list = &zip_devices;
1438 qm->err_ini = &hisi_zip_err_ini;
1439 if (pf_q_num_flag)
1440 set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
1441 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
1442 /*
1443 * have no way to get qm configure in VM in v1 hardware,
1444 * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
1445 * to trigger only one VF in v1 hardware.
1446 *
1447 * v2 hardware has no such problem.
1448 */
1449 qm->qp_base = HZIP_PF_DEF_Q_NUM;
1450 qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
1451 }
1452
1453 ret = hisi_qm_init(qm);
1454 if (ret) {
1455 pci_err(qm->pdev, "Failed to init zip qm configures!\n");
1456 return ret;
1457 }
1458
1459 zip_set_channels(qm);
1460 /* Fetch and save the value of capability registers */
1461 ret = zip_pre_store_cap_reg(qm);
1462 if (ret) {
1463 pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
1464 goto err_qm_uninit;
1465 }
1466
1467 alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
1468 ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
1469 if (ret) {
1470 pci_err(qm->pdev, "Failed to set zip algs!\n");
1471 goto err_qm_uninit;
1472 }
1473
1474 ret = hisi_dae_set_alg(qm);
1475 if (ret)
1476 goto err_qm_uninit;
1477
1478 return 0;
1479
1480err_qm_uninit:
1481 hisi_qm_uninit(qm);
1482 return ret;
1483}
1484
1485static void hisi_zip_qm_uninit(struct hisi_qm *qm)
1486{
1487 hisi_qm_uninit(qm);
1488}
1489
1490static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
1491{
1492 u32 type_rate = HZIP_SHAPER_RATE_COMPRESS;
1493 struct hisi_qm *qm = &hisi_zip->qm;
1494 int ret;
1495
1496 if (qm->fun_type == QM_HW_PF) {
1497 ret = hisi_zip_pf_probe_init(hisi_zip);
1498 if (ret)
1499 return ret;
1500 /* enable shaper type 0 */
1501 if (qm->ver >= QM_HW_V3) {
1502 type_rate |= QM_SHAPER_ENABLE;
1503
1504 /* ZIP need to enable shaper type 1 */
1505 type_rate |= HZIP_SHAPER_RATE_DECOMPRESS << QM_SHAPER_TYPE1_OFFSET;
1506 qm->type_rate = type_rate;
1507 }
1508 }
1509
1510 return 0;
1511}
1512
1513static void hisi_zip_probe_uninit(struct hisi_qm *qm)
1514{
1515 if (qm->fun_type == QM_HW_VF)
1516 return;
1517
1518 hisi_zip_show_last_regs_uninit(qm);
1519 hisi_zip_close_sva_prefetch(qm);
1520 hisi_qm_dev_err_uninit(qm);
1521}
1522
1523static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1524{
1525 struct hisi_zip *hisi_zip;
1526 struct hisi_qm *qm;
1527 int ret;
1528
1529 hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
1530 if (!hisi_zip)
1531 return -ENOMEM;
1532
1533 qm = &hisi_zip->qm;
1534
1535 ret = hisi_zip_qm_init(qm, pdev);
1536 if (ret) {
1537 pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret);
1538 return ret;
1539 }
1540
1541 ret = hisi_zip_probe_init(hisi_zip);
1542 if (ret) {
1543 pci_err(pdev, "Failed to probe (%d)!\n", ret);
1544 goto err_qm_uninit;
1545 }
1546
1547 ret = hisi_qm_start(qm);
1548 if (ret)
1549 goto err_probe_uninit;
1550
1551 ret = hisi_zip_debugfs_init(qm);
1552 if (ret)
1553 pci_err(pdev, "failed to init debugfs (%d)!\n", ret);
1554
1555 hisi_qm_add_list(qm, &zip_devices);
1556 ret = hisi_qm_alg_register(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
1557 if (ret < 0) {
1558 pci_err(pdev, "failed to register driver to crypto!\n");
1559 goto err_qm_del_list;
1560 }
1561
1562 if (qm->uacce) {
1563 ret = uacce_register(qm->uacce);
1564 if (ret) {
1565 pci_err(pdev, "failed to register uacce (%d)!\n", ret);
1566 goto err_qm_alg_unregister;
1567 }
1568 }
1569
1570 if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
1571 ret = hisi_qm_sriov_enable(pdev, vfs_num);
1572 if (ret < 0)
1573 goto err_qm_alg_unregister;
1574 }
1575
1576 hisi_qm_pm_init(qm);
1577
1578 return 0;
1579
1580err_qm_alg_unregister:
1581 hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
1582
1583err_qm_del_list:
1584 hisi_qm_del_list(qm, &zip_devices);
1585 hisi_zip_debugfs_exit(qm);
1586 hisi_qm_stop(qm, QM_NORMAL);
1587
1588err_probe_uninit:
1589 hisi_zip_probe_uninit(qm);
1590
1591err_qm_uninit:
1592 hisi_zip_qm_uninit(qm);
1593
1594 return ret;
1595}
1596
1597static void hisi_zip_remove(struct pci_dev *pdev)
1598{
1599 struct hisi_qm *qm = pci_get_drvdata(pdev);
1600
1601 hisi_qm_pm_uninit(qm);
1602 hisi_qm_wait_task_finish(qm, &zip_devices);
1603 hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
1604 hisi_qm_del_list(qm, &zip_devices);
1605
1606 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
1607 hisi_qm_sriov_disable(pdev, true);
1608
1609 hisi_zip_debugfs_exit(qm);
1610 hisi_qm_stop(qm, QM_NORMAL);
1611 hisi_zip_probe_uninit(qm);
1612 hisi_zip_qm_uninit(qm);
1613}
1614
1615static const struct dev_pm_ops hisi_zip_pm_ops = {
1616 SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
1617};
1618
1619static const struct pci_error_handlers hisi_zip_err_handler = {
1620 .error_detected = hisi_qm_dev_err_detected,
1621 .slot_reset = hisi_qm_dev_slot_reset,
1622 .reset_prepare = hisi_qm_reset_prepare,
1623 .reset_done = hisi_qm_reset_done,
1624};
1625
1626static struct pci_driver hisi_zip_pci_driver = {
1627 .name = "hisi_zip",
1628 .id_table = hisi_zip_dev_ids,
1629 .probe = hisi_zip_probe,
1630 .remove = hisi_zip_remove,
1631 .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
1632 hisi_qm_sriov_configure : NULL,
1633 .err_handler = &hisi_zip_err_handler,
1634 .shutdown = hisi_qm_dev_shutdown,
1635 .driver.pm = &hisi_zip_pm_ops,
1636};
1637
1638struct pci_driver *hisi_zip_get_pf_driver(void)
1639{
1640 return &hisi_zip_pci_driver;
1641}
1642EXPORT_SYMBOL_GPL(hisi_zip_get_pf_driver);
1643
1644static void hisi_zip_register_debugfs(void)
1645{
1646 if (!debugfs_initialized())
1647 return;
1648
1649 hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL);
1650}
1651
1652static void hisi_zip_unregister_debugfs(void)
1653{
1654 debugfs_remove_recursive(hzip_debugfs_root);
1655}
1656
1657static int __init hisi_zip_init(void)
1658{
1659 int ret;
1660
1661 hisi_qm_init_list(&zip_devices);
1662 hisi_zip_register_debugfs();
1663
1664 ret = pci_register_driver(&hisi_zip_pci_driver);
1665 if (ret < 0) {
1666 hisi_zip_unregister_debugfs();
1667 pr_err("Failed to register pci driver.\n");
1668 }
1669
1670 return ret;
1671}
1672
1673static void __exit hisi_zip_exit(void)
1674{
1675 pci_unregister_driver(&hisi_zip_pci_driver);
1676 hisi_zip_unregister_debugfs();
1677}
1678
1679module_init(hisi_zip_init);
1680module_exit(hisi_zip_exit);
1681
1682MODULE_LICENSE("GPL v2");
1683MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
1684MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");