Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Asynchronous Compression operations
4 *
5 * Copyright (c) 2016, Intel Corporation
6 * Authors: Weigang Li <weigang.li@intel.com>
7 * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 */
9#ifndef _CRYPTO_ACOMP_H
10#define _CRYPTO_ACOMP_H
11
12#include <linux/atomic.h>
13#include <linux/args.h>
14#include <linux/compiler_types.h>
15#include <linux/container_of.h>
16#include <linux/crypto.h>
17#include <linux/err.h>
18#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <linux/spinlock_types.h>
21#include <linux/types.h>
22
23/* Set this bit if source is virtual address instead of SG list. */
24#define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002
25
26/* Set this bit for if virtual address source cannot be used for DMA. */
27#define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004
28
29/* Set this bit if destination is virtual address instead of SG list. */
30#define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008
31
32/* Set this bit for if virtual address destination cannot be used for DMA. */
33#define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010
34
35/* Private flags that should not be touched by the user. */
36#define CRYPTO_ACOMP_REQ_PRIVATE \
37 (CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \
38 CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA)
39
40#define CRYPTO_ACOMP_DST_MAX 131072
41
42#define MAX_SYNC_COMP_REQSIZE 0
43
44#define ACOMP_REQUEST_ON_STACK(name, tfm) \
45 char __##name##_req[sizeof(struct acomp_req) + \
46 MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
47 struct acomp_req *name = acomp_request_on_stack_init( \
48 __##name##_req, (tfm))
49
50#define ACOMP_REQUEST_CLONE(name, gfp) \
51 acomp_request_clone(name, sizeof(__##name##_req), gfp)
52
53struct acomp_req;
54struct folio;
55
56struct acomp_req_chain {
57 crypto_completion_t compl;
58 void *data;
59 struct scatterlist ssg;
60 struct scatterlist dsg;
61 union {
62 const u8 *src;
63 struct folio *sfolio;
64 };
65 union {
66 u8 *dst;
67 struct folio *dfolio;
68 };
69 u32 flags;
70};
71
72/**
73 * struct acomp_req - asynchronous (de)compression request
74 *
75 * @base: Common attributes for asynchronous crypto requests
76 * @src: Source scatterlist
77 * @dst: Destination scatterlist
78 * @svirt: Source virtual address
79 * @dvirt: Destination virtual address
80 * @slen: Size of the input buffer
81 * @dlen: Size of the output buffer and number of bytes produced
82 * @chain: Private API code data, do not use
83 * @__ctx: Start of private context data
84 */
85struct acomp_req {
86 struct crypto_async_request base;
87 union {
88 struct scatterlist *src;
89 const u8 *svirt;
90 };
91 union {
92 struct scatterlist *dst;
93 u8 *dvirt;
94 };
95 unsigned int slen;
96 unsigned int dlen;
97
98 struct acomp_req_chain chain;
99
100 void *__ctx[] CRYPTO_MINALIGN_ATTR;
101};
102
103/**
104 * struct crypto_acomp - user-instantiated objects which encapsulate
105 * algorithms and core processing logic
106 *
107 * @compress: Function performs a compress operation
108 * @decompress: Function performs a de-compress operation
109 * @reqsize: Context size for (de)compression requests
110 * @fb: Synchronous fallback tfm
111 * @base: Common crypto API algorithm data structure
112 */
113struct crypto_acomp {
114 int (*compress)(struct acomp_req *req);
115 int (*decompress)(struct acomp_req *req);
116 unsigned int reqsize;
117 struct crypto_tfm base;
118};
119
120#define COMP_ALG_COMMON { \
121 struct crypto_alg base; \
122}
123struct comp_alg_common COMP_ALG_COMMON;
124
125/**
126 * DOC: Asynchronous Compression API
127 *
128 * The Asynchronous Compression API is used with the algorithms of type
129 * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
130 */
131
132/**
133 * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
134 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
135 * compression algorithm e.g. "deflate"
136 * @type: specifies the type of the algorithm
137 * @mask: specifies the mask for the algorithm
138 *
139 * Allocate a handle for a compression algorithm. The returned struct
140 * crypto_acomp is the handle that is required for any subsequent
141 * API invocation for the compression operations.
142 *
143 * Return: allocated handle in case of success; IS_ERR() is true in case
144 * of an error, PTR_ERR() returns the error code.
145 */
146struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
147 u32 mask);
148/**
149 * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
150 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
151 * compression algorithm e.g. "deflate"
152 * @type: specifies the type of the algorithm
153 * @mask: specifies the mask for the algorithm
154 * @node: specifies the NUMA node the ZIP hardware belongs to
155 *
156 * Allocate a handle for a compression algorithm. Drivers should try to use
157 * (de)compressors on the specified NUMA node.
158 * The returned struct crypto_acomp is the handle that is required for any
159 * subsequent API invocation for the compression operations.
160 *
161 * Return: allocated handle in case of success; IS_ERR() is true in case
162 * of an error, PTR_ERR() returns the error code.
163 */
164struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
165 u32 mask, int node);
166
167static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
168{
169 return &tfm->base;
170}
171
172static inline struct comp_alg_common *__crypto_comp_alg_common(
173 struct crypto_alg *alg)
174{
175 return container_of(alg, struct comp_alg_common, base);
176}
177
178static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
179{
180 return container_of(tfm, struct crypto_acomp, base);
181}
182
183static inline struct comp_alg_common *crypto_comp_alg_common(
184 struct crypto_acomp *tfm)
185{
186 return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
187}
188
189static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
190{
191 return tfm->reqsize;
192}
193
194static inline void acomp_request_set_tfm(struct acomp_req *req,
195 struct crypto_acomp *tfm)
196{
197 crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm));
198}
199
200static inline bool acomp_is_async(struct crypto_acomp *tfm)
201{
202 return crypto_comp_alg_common(tfm)->base.cra_flags &
203 CRYPTO_ALG_ASYNC;
204}
205
206static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
207{
208 return __crypto_acomp_tfm(req->base.tfm);
209}
210
211/**
212 * crypto_free_acomp() -- free ACOMPRESS tfm handle
213 *
214 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
215 *
216 * If @tfm is a NULL or error pointer, this function does nothing.
217 */
218static inline void crypto_free_acomp(struct crypto_acomp *tfm)
219{
220 crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
221}
222
223static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
224{
225 type &= ~CRYPTO_ALG_TYPE_MASK;
226 type |= CRYPTO_ALG_TYPE_ACOMPRESS;
227 mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
228
229 return crypto_has_alg(alg_name, type, mask);
230}
231
232static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm)
233{
234 return crypto_tfm_alg_name(crypto_acomp_tfm(tfm));
235}
236
237static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
238{
239 return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
240}
241
242/**
243 * acomp_request_alloc_extra() -- allocates asynchronous (de)compression request
244 *
245 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
246 * @extra: amount of extra memory
247 * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL)
248 *
249 * Return: allocated handle in case of success or NULL in case of an error
250 */
251static inline struct acomp_req *acomp_request_alloc_extra_noprof(
252 struct crypto_acomp *tfm, size_t extra, gfp_t gfp)
253{
254 struct acomp_req *req;
255 size_t len;
256
257 len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
258 if (check_add_overflow(len, extra, &len))
259 return NULL;
260
261 req = kzalloc_noprof(len, gfp);
262 if (likely(req))
263 acomp_request_set_tfm(req, tfm);
264 return req;
265}
266#define acomp_request_alloc_noprof(tfm, ...) \
267 CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \
268 tfm, ##__VA_ARGS__)
269#define acomp_request_alloc_noprof_0(tfm) \
270 acomp_request_alloc_noprof_1(tfm, GFP_KERNEL)
271#define acomp_request_alloc_noprof_1(tfm, gfp) \
272 acomp_request_alloc_extra_noprof(tfm, 0, gfp)
273#define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__))
274
275/**
276 * acomp_request_alloc_extra() -- allocate acomp request with extra memory
277 *
278 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
279 * @extra: amount of extra memory
280 * @gfp: gfp to pass to kzalloc
281 *
282 * Return: allocated handle in case of success or NULL in case of an error
283 */
284#define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__))
285
286static inline void *acomp_request_extra(struct acomp_req *req)
287{
288 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
289 size_t len;
290
291 len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
292 return (void *)((char *)req + len);
293}
294
295static inline bool acomp_req_on_stack(struct acomp_req *req)
296{
297 return crypto_req_on_stack(&req->base);
298}
299
300/**
301 * acomp_request_free() -- zeroize and free asynchronous (de)compression
302 * request as well as the output buffer if allocated
303 * inside the algorithm
304 *
305 * @req: request to free
306 */
307static inline void acomp_request_free(struct acomp_req *req)
308{
309 if (!req || acomp_req_on_stack(req))
310 return;
311 kfree_sensitive(req);
312}
313
314/**
315 * acomp_request_set_callback() -- Sets an asynchronous callback
316 *
317 * Callback will be called when an asynchronous operation on a given
318 * request is finished.
319 *
320 * @req: request that the callback will be set for
321 * @flgs: specify for instance if the operation may backlog
322 * @cmpl: callback which will be called
323 * @data: private data used by the caller
324 */
325static inline void acomp_request_set_callback(struct acomp_req *req,
326 u32 flgs,
327 crypto_completion_t cmpl,
328 void *data)
329{
330 flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE;
331 flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
332 crypto_request_set_callback(&req->base, flgs, cmpl, data);
333}
334
335/**
336 * acomp_request_set_params() -- Sets request parameters
337 *
338 * Sets parameters required by an acomp operation
339 *
340 * @req: asynchronous compress request
341 * @src: pointer to input buffer scatterlist
342 * @dst: pointer to output buffer scatterlist. If this is NULL, the
343 * acomp layer will allocate the output memory
344 * @slen: size of the input buffer
345 * @dlen: size of the output buffer. If dst is NULL, this can be used by
346 * the user to specify the maximum amount of memory to allocate
347 */
348static inline void acomp_request_set_params(struct acomp_req *req,
349 struct scatterlist *src,
350 struct scatterlist *dst,
351 unsigned int slen,
352 unsigned int dlen)
353{
354 req->src = src;
355 req->dst = dst;
356 req->slen = slen;
357 req->dlen = dlen;
358
359 req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
360 CRYPTO_ACOMP_REQ_SRC_NONDMA |
361 CRYPTO_ACOMP_REQ_DST_VIRT |
362 CRYPTO_ACOMP_REQ_DST_NONDMA);
363}
364
365/**
366 * acomp_request_set_src_sg() -- Sets source scatterlist
367 *
368 * Sets source scatterlist required by an acomp operation.
369 *
370 * @req: asynchronous compress request
371 * @src: pointer to input buffer scatterlist
372 * @slen: size of the input buffer
373 */
374static inline void acomp_request_set_src_sg(struct acomp_req *req,
375 struct scatterlist *src,
376 unsigned int slen)
377{
378 req->src = src;
379 req->slen = slen;
380
381 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
382 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
383}
384
385/**
386 * acomp_request_set_src_dma() -- Sets DMA source virtual address
387 *
388 * Sets source virtual address required by an acomp operation.
389 * The address must be usable for DMA.
390 *
391 * @req: asynchronous compress request
392 * @src: virtual address pointer to input buffer
393 * @slen: size of the input buffer
394 */
395static inline void acomp_request_set_src_dma(struct acomp_req *req,
396 const u8 *src, unsigned int slen)
397{
398 req->svirt = src;
399 req->slen = slen;
400
401 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
402 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
403}
404
405/**
406 * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address
407 *
408 * Sets source virtual address required by an acomp operation.
409 * The address can not be used for DMA.
410 *
411 * @req: asynchronous compress request
412 * @src: virtual address pointer to input buffer
413 * @slen: size of the input buffer
414 */
415static inline void acomp_request_set_src_nondma(struct acomp_req *req,
416 const u8 *src,
417 unsigned int slen)
418{
419 req->svirt = src;
420 req->slen = slen;
421
422 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
423 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
424}
425
426/**
427 * acomp_request_set_src_folio() -- Sets source folio
428 *
429 * Sets source folio required by an acomp operation.
430 *
431 * @req: asynchronous compress request
432 * @folio: pointer to input folio
433 * @off: input folio offset
434 * @len: size of the input buffer
435 */
436static inline void acomp_request_set_src_folio(struct acomp_req *req,
437 struct folio *folio, size_t off,
438 unsigned int len)
439{
440 sg_init_table(&req->chain.ssg, 1);
441 sg_set_folio(&req->chain.ssg, folio, len, off);
442 acomp_request_set_src_sg(req, &req->chain.ssg, len);
443}
444
445/**
446 * acomp_request_set_dst_sg() -- Sets destination scatterlist
447 *
448 * Sets destination scatterlist required by an acomp operation.
449 *
450 * @req: asynchronous compress request
451 * @dst: pointer to output buffer scatterlist
452 * @dlen: size of the output buffer
453 */
454static inline void acomp_request_set_dst_sg(struct acomp_req *req,
455 struct scatterlist *dst,
456 unsigned int dlen)
457{
458 req->dst = dst;
459 req->dlen = dlen;
460
461 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
462 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
463}
464
465/**
466 * acomp_request_set_dst_dma() -- Sets DMA destination virtual address
467 *
468 * Sets destination virtual address required by an acomp operation.
469 * The address must be usable for DMA.
470 *
471 * @req: asynchronous compress request
472 * @dst: virtual address pointer to output buffer
473 * @dlen: size of the output buffer
474 */
475static inline void acomp_request_set_dst_dma(struct acomp_req *req,
476 u8 *dst, unsigned int dlen)
477{
478 req->dvirt = dst;
479 req->dlen = dlen;
480
481 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
482 req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
483}
484
485/**
486 * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address
487 *
488 * Sets destination virtual address required by an acomp operation.
489 * The address can not be used for DMA.
490 *
491 * @req: asynchronous compress request
492 * @dst: virtual address pointer to output buffer
493 * @dlen: size of the output buffer
494 */
495static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
496 u8 *dst, unsigned int dlen)
497{
498 req->dvirt = dst;
499 req->dlen = dlen;
500
501 req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
502 req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
503}
504
505/**
506 * acomp_request_set_dst_folio() -- Sets destination folio
507 *
508 * Sets destination folio required by an acomp operation.
509 *
510 * @req: asynchronous compress request
511 * @folio: pointer to input folio
512 * @off: input folio offset
513 * @len: size of the input buffer
514 */
515static inline void acomp_request_set_dst_folio(struct acomp_req *req,
516 struct folio *folio, size_t off,
517 unsigned int len)
518{
519 sg_init_table(&req->chain.dsg, 1);
520 sg_set_folio(&req->chain.dsg, folio, len, off);
521 acomp_request_set_dst_sg(req, &req->chain.dsg, len);
522}
523
524/**
525 * crypto_acomp_compress() -- Invoke asynchronous compress operation
526 *
527 * Function invokes the asynchronous compress operation
528 *
529 * @req: asynchronous compress request
530 *
531 * Return: zero on success; error code in case of error
532 */
533int crypto_acomp_compress(struct acomp_req *req);
534
535/**
536 * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
537 *
538 * Function invokes the asynchronous decompress operation
539 *
540 * @req: asynchronous compress request
541 *
542 * Return: zero on success; error code in case of error
543 */
544int crypto_acomp_decompress(struct acomp_req *req);
545
546static inline struct acomp_req *acomp_request_on_stack_init(
547 char *buf, struct crypto_acomp *tfm)
548{
549 struct acomp_req *req = (void *)buf;
550
551 crypto_stack_request_init(&req->base, crypto_acomp_tfm(tfm));
552 return req;
553}
554
555struct acomp_req *acomp_request_clone(struct acomp_req *req,
556 size_t total, gfp_t gfp);
557
558#endif