Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include <linux/kernel.h>
4#include <linux/string.h>
5#include <linux/err.h>
6#include <linux/slab.h>
7#include <linux/wait.h>
8#include <linux/sched.h>
9#include <linux/cpuhotplug.h>
10#include <linux/vmalloc.h>
11#include <linux/sysfs.h>
12
13#include "zcomp.h"
14
15#include "backend_lzo.h"
16#include "backend_lzorle.h"
17#include "backend_lz4.h"
18#include "backend_lz4hc.h"
19#include "backend_zstd.h"
20#include "backend_deflate.h"
21#include "backend_842.h"
22
23static const struct zcomp_ops *backends[] = {
24#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
25 &backend_lzorle,
26 &backend_lzo,
27#endif
28#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
29 &backend_lz4,
30#endif
31#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
32 &backend_lz4hc,
33#endif
34#if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
35 &backend_zstd,
36#endif
37#if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
38 &backend_deflate,
39#endif
40#if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
41 &backend_842,
42#endif
43 NULL
44};
45
46static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
47{
48 comp->ops->destroy_ctx(&zstrm->ctx);
49 vfree(zstrm->local_copy);
50 vfree(zstrm->buffer);
51 zstrm->buffer = NULL;
52}
53
54static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
55{
56 int ret;
57
58 ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
59 if (ret)
60 return ret;
61
62 zstrm->local_copy = vzalloc(PAGE_SIZE);
63 /*
64 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
65 * case when compressed size is larger than the original one
66 */
67 zstrm->buffer = vzalloc(2 * PAGE_SIZE);
68 if (!zstrm->buffer || !zstrm->local_copy) {
69 zcomp_strm_free(comp, zstrm);
70 return -ENOMEM;
71 }
72 return 0;
73}
74
75static const struct zcomp_ops *lookup_backend_ops(const char *comp)
76{
77 int i = 0;
78
79 while (backends[i]) {
80 if (sysfs_streq(comp, backends[i]->name))
81 break;
82 i++;
83 }
84 return backends[i];
85}
86
87const char *zcomp_lookup_backend_name(const char *comp)
88{
89 const struct zcomp_ops *backend = lookup_backend_ops(comp);
90
91 if (backend)
92 return backend->name;
93
94 return NULL;
95}
96
97/* show available compressors */
98ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at)
99{
100 int i;
101
102 for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
103 if (!strcmp(comp, backends[i]->name)) {
104 at += sysfs_emit_at(buf, at, "[%s] ",
105 backends[i]->name);
106 } else {
107 at += sysfs_emit_at(buf, at, "%s ", backends[i]->name);
108 }
109 }
110
111 at += sysfs_emit_at(buf, at, "\n");
112 return at;
113}
114
115struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
116{
117 for (;;) {
118 struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
119
120 /*
121 * Inspired by zswap
122 *
123 * stream is returned with ->mutex locked which prevents
124 * cpu_dead() from releasing this stream under us, however
125 * there is still a race window between raw_cpu_ptr() and
126 * mutex_lock(), during which we could have been migrated
127 * from a CPU that has already destroyed its stream. If
128 * so then unlock and re-try on the current CPU.
129 */
130 mutex_lock(&zstrm->lock);
131 if (likely(zstrm->buffer))
132 return zstrm;
133 mutex_unlock(&zstrm->lock);
134 }
135}
136
137void zcomp_stream_put(struct zcomp_strm *zstrm)
138{
139 mutex_unlock(&zstrm->lock);
140}
141
142int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
143 const void *src, unsigned int *dst_len)
144{
145 struct zcomp_req req = {
146 .src = src,
147 .dst = zstrm->buffer,
148 .src_len = PAGE_SIZE,
149 .dst_len = 2 * PAGE_SIZE,
150 };
151 int ret;
152
153 might_sleep();
154 ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
155 if (!ret)
156 *dst_len = req.dst_len;
157 return ret;
158}
159
160int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
161 const void *src, unsigned int src_len, void *dst)
162{
163 struct zcomp_req req = {
164 .src = src,
165 .dst = dst,
166 .src_len = src_len,
167 .dst_len = PAGE_SIZE,
168 };
169
170 might_sleep();
171 return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
172}
173
174int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
175{
176 struct zcomp *comp = hlist_entry(node, struct zcomp, node);
177 struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
178 int ret;
179
180 ret = zcomp_strm_init(comp, zstrm);
181 if (ret)
182 pr_err("Can't allocate a compression stream\n");
183 return ret;
184}
185
186int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
187{
188 struct zcomp *comp = hlist_entry(node, struct zcomp, node);
189 struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
190
191 mutex_lock(&zstrm->lock);
192 zcomp_strm_free(comp, zstrm);
193 mutex_unlock(&zstrm->lock);
194 return 0;
195}
196
197static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
198{
199 int ret, cpu;
200
201 comp->stream = alloc_percpu(struct zcomp_strm);
202 if (!comp->stream)
203 return -ENOMEM;
204
205 comp->params = params;
206 ret = comp->ops->setup_params(comp->params);
207 if (ret)
208 goto cleanup;
209
210 for_each_possible_cpu(cpu)
211 mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
212
213 ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
214 if (ret < 0)
215 goto cleanup;
216
217 return 0;
218
219cleanup:
220 comp->ops->release_params(comp->params);
221 free_percpu(comp->stream);
222 return ret;
223}
224
225void zcomp_destroy(struct zcomp *comp)
226{
227 cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
228 comp->ops->release_params(comp->params);
229 free_percpu(comp->stream);
230 kfree(comp);
231}
232
233struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
234{
235 struct zcomp *comp;
236 int error;
237
238 /*
239 * The backends array has a sentinel NULL value, so the minimum
240 * size is 1. In order to be valid the array, apart from the
241 * sentinel NULL element, should have at least one compression
242 * backend selected.
243 */
244 BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
245
246 comp = kzalloc_obj(struct zcomp);
247 if (!comp)
248 return ERR_PTR(-ENOMEM);
249
250 comp->ops = lookup_backend_ops(alg);
251 if (!comp->ops) {
252 kfree(comp);
253 return ERR_PTR(-EINVAL);
254 }
255
256 error = zcomp_init(comp, params);
257 if (error) {
258 kfree(comp);
259 return ERR_PTR(error);
260 }
261 return comp;
262}