unlzma: fix a race condition and add some optimizations to improve performance also make peek_old_byte errors non-fatal

SVN-Revision: 17678
This commit is contained in:
Felix Fietkau 2009-09-22 18:00:28 +00:00
parent 392e45ef09
commit 0754070949
2 changed files with 124 additions and 36 deletions

View file

@ -1,6 +1,6 @@
--- /dev/null --- /dev/null
+++ b/crypto/unlzma.c +++ b/crypto/unlzma.c
@@ -0,0 +1,748 @@ @@ -0,0 +1,772 @@
+/* +/*
+ * LZMA uncompresion module for pcomp + * LZMA uncompresion module for pcomp
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
@ -50,7 +50,9 @@
+struct unlzma_ctx { +struct unlzma_ctx {
+ struct task_struct *thread; + struct task_struct *thread;
+ wait_queue_head_t next_req; + wait_queue_head_t next_req;
+ wait_queue_head_t req_done;
+ struct mutex mutex; + struct mutex mutex;
+ bool waiting;
+ bool active; + bool active;
+ bool cancel; + bool cancel;
+ +
@ -106,7 +108,9 @@
+unlzma_request_buffer(struct unlzma_ctx *ctx, int *avail) +unlzma_request_buffer(struct unlzma_ctx *ctx, int *avail)
+{ +{
+ do { + do {
+ ctx->waiting = true;
+ mutex_unlock(&ctx->mutex); + mutex_unlock(&ctx->mutex);
+ wake_up(&ctx->req_done);
+ if (wait_event_interruptible(ctx->next_req, + if (wait_event_interruptible(ctx->next_req,
+ unlzma_should_stop(ctx) || (*avail > 0))) + unlzma_should_stop(ctx) || (*avail > 0)))
+ schedule(); + schedule();
@ -213,22 +217,35 @@
+ int i = ctx->n_buffers; + int i = ctx->n_buffers;
+ u32 pos; + u32 pos;
+ +
+ BUG_ON(!ctx->n_buffers); + if (!ctx->n_buffers) {
+ pos = ctx->pos - offs; + printk(KERN_ERR "unlzma/%s: no buffer\n", __func__);
+ if (pos >= ctx->dict_size) { + goto error;
+ pos = (~pos % ctx->dict_size);
+ } + }
+ +
+ pos = ctx->pos - offs;
+ if (unlikely(pos >= ctx->dict_size))
+ pos = ~pos & (ctx->dict_size - 1);
+
+ while (bh->offset > pos) { + while (bh->offset > pos) {
+ bh--; + bh--;
+ i--; + i--;
+ BUG_ON(!i); + if (!i) {
+ printk(KERN_ERR "unlzma/%s: position %d out of range\n", __func__, pos);
+ goto error;
+ }
+ } + }
+ +
+ pos -= bh->offset; + pos -= bh->offset;
+ BUG_ON(pos >= bh->size); + if (pos >= bh->size) {
+ printk(KERN_ERR "unlzma/%s: position %d out of range\n", __func__, pos);
+ goto error;
+ }
+ +
+ return bh->ptr[pos]; + return bh->ptr[pos];
+
+error:
+ ctx->cancel = true;
+ return 0;
+} +}
+ +
+static void +static void
@ -635,8 +652,10 @@
+ if (!ctx->buffers) + if (!ctx->buffers)
+ return -ENOMEM; + return -ENOMEM;
+ +
+ ctx->waiting = false;
+ mutex_init(&ctx->mutex); + mutex_init(&ctx->mutex);
+ init_waitqueue_head(&ctx->next_req); + init_waitqueue_head(&ctx->next_req);
+ init_waitqueue_head(&ctx->req_done);
+ ctx->thread = kthread_run(unlzma_thread, ctx, "unlzma/%d", instance++); + ctx->thread = kthread_run(unlzma_thread, ctx, "unlzma/%d", instance++);
+ if (IS_ERR(ctx->thread)) { + if (IS_ERR(ctx->thread)) {
+ ret = PTR_ERR(ctx->thread); + ret = PTR_ERR(ctx->thread);
@ -649,21 +668,22 @@
+static int +static int
+unlzma_decompress_init(struct crypto_pcomp *tfm) +unlzma_decompress_init(struct crypto_pcomp *tfm)
+{ +{
+ struct unlzma_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+
+ ctx->pos = 0;
+ return 0; + return 0;
+} +}
+ +
+static void +static void
+unlzma_wait_complete(struct unlzma_ctx *ctx, bool finish) +unlzma_wait_complete(struct unlzma_ctx *ctx, bool finish)
+{ +{
+ DEFINE_WAIT(__wait);
+
+ do { + do {
+ mutex_unlock(&ctx->mutex);
+ wake_up(&ctx->next_req); + wake_up(&ctx->next_req);
+ prepare_to_wait(&ctx->req_done, &__wait, TASK_INTERRUPTIBLE);
+ mutex_unlock(&ctx->mutex);
+ schedule(); + schedule();
+ mutex_lock(&ctx->mutex); + mutex_lock(&ctx->mutex);
+ } while (ctx->active && (ctx->avail_in > 0) && (ctx->avail_out > 0)); + } while (!ctx->waiting && ctx->active);
+ finish_wait(&ctx->req_done, &__wait);
+} +}
+ +
+static int +static int
@ -677,6 +697,7 @@
+ goto out; + goto out;
+ +
+ pos = ctx->pos; + pos = ctx->pos;
+ ctx->waiting = false;
+ ctx->next_in = req->next_in; + ctx->next_in = req->next_in;
+ ctx->avail_in = req->avail_in; + ctx->avail_in = req->avail_in;
+ ctx->next_out = req->next_out; + ctx->next_out = req->next_out;
@ -694,6 +715,9 @@
+ +
+out: +out:
+ mutex_unlock(&ctx->mutex); + mutex_unlock(&ctx->mutex);
+ if (ctx->cancel)
+ return -EINVAL;
+
+ return pos; + return pos;
+} +}
+ +

View file

@ -1,6 +1,6 @@
--- /dev/null --- /dev/null
+++ b/crypto/unlzma.c +++ b/crypto/unlzma.c
@@ -0,0 +1,723 @@ @@ -0,0 +1,772 @@
+/* +/*
+ * LZMA uncompresion module for pcomp + * LZMA uncompresion module for pcomp
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
@ -36,12 +36,12 @@
+#include <linux/kthread.h> +#include <linux/kthread.h>
+ +
+#include <crypto/internal/compress.h> +#include <crypto/internal/compress.h>
+#include <net/netlink.h>
+#include "unlzma.h" +#include "unlzma.h"
+ +
+static int instance = 0; +static int instance = 0;
+ +
+struct unlzma_buffer { +struct unlzma_buffer {
+ struct unlzma_buffer *last;
+ int offset; + int offset;
+ int size; + int size;
+ u8 *ptr; + u8 *ptr;
@ -50,7 +50,9 @@
+struct unlzma_ctx { +struct unlzma_ctx {
+ struct task_struct *thread; + struct task_struct *thread;
+ wait_queue_head_t next_req; + wait_queue_head_t next_req;
+ wait_queue_head_t req_done;
+ struct mutex mutex; + struct mutex mutex;
+ bool waiting;
+ bool active; + bool active;
+ bool cancel; + bool cancel;
+ +
@ -68,8 +70,10 @@
+ /* writer state */ + /* writer state */
+ u8 previous_byte; + u8 previous_byte;
+ ssize_t pos; + ssize_t pos;
+ struct unlzma_buffer *head;
+ int buf_full; + int buf_full;
+ int n_buffers;
+ int buffers_max;
+ struct unlzma_buffer *buffers;
+ +
+ /* cstate */ + /* cstate */
+ int state; + int state;
@ -92,12 +96,11 @@
+{ +{
+ struct unlzma_buffer *bh; + struct unlzma_buffer *bh;
+ +
+ bh = kzalloc(sizeof(struct unlzma_buffer), GFP_KERNEL); + BUG_ON(ctx->n_buffers >= ctx->buffers_max);
+ bh = &ctx->buffers[ctx->n_buffers++];
+ bh->ptr = ctx->next_out; + bh->ptr = ctx->next_out;
+ bh->offset = ctx->pos; + bh->offset = ctx->pos;
+ bh->last = ctx->head;
+ bh->size = ctx->avail_out; + bh->size = ctx->avail_out;
+ ctx->head = bh;
+ ctx->buf_full = 0; + ctx->buf_full = 0;
+} +}
+ +
@ -105,7 +108,9 @@
+unlzma_request_buffer(struct unlzma_ctx *ctx, int *avail) +unlzma_request_buffer(struct unlzma_ctx *ctx, int *avail)
+{ +{
+ do { + do {
+ ctx->waiting = true;
+ mutex_unlock(&ctx->mutex); + mutex_unlock(&ctx->mutex);
+ wake_up(&ctx->req_done);
+ if (wait_event_interruptible(ctx->next_req, + if (wait_event_interruptible(ctx->next_req,
+ unlzma_should_stop(ctx) || (*avail > 0))) + unlzma_should_stop(ctx) || (*avail > 0)))
+ schedule(); + schedule();
@ -208,23 +213,39 @@
+static u8 +static u8
+peek_old_byte(struct unlzma_ctx *ctx, u32 offs) +peek_old_byte(struct unlzma_ctx *ctx, u32 offs)
+{ +{
+ struct unlzma_buffer *bh = ctx->head; + struct unlzma_buffer *bh = &ctx->buffers[ctx->n_buffers - 1];
+ int i = ctx->n_buffers;
+ u32 pos; + u32 pos;
+ +
+ pos = ctx->pos - offs; + if (!ctx->n_buffers) {
+ if (pos >= ctx->dict_size) { + printk(KERN_ERR "unlzma/%s: no buffer\n", __func__);
+ pos = (~pos % ctx->dict_size); + goto error;
+ } + }
+ +
+ pos = ctx->pos - offs;
+ if (unlikely(pos >= ctx->dict_size))
+ pos = ~pos & (ctx->dict_size - 1);
+
+ while (bh->offset > pos) { + while (bh->offset > pos) {
+ bh = bh->last; + bh--;
+ BUG_ON(!bh); + i--;
+ if (!i) {
+ printk(KERN_ERR "unlzma/%s: position %d out of range\n", __func__, pos);
+ goto error;
+ }
+ } + }
+ +
+ pos -= bh->offset; + pos -= bh->offset;
+ BUG_ON(pos >= bh->size); + if (pos >= bh->size) {
+ printk(KERN_ERR "unlzma/%s: position %d out of range\n", __func__, pos);
+ goto error;
+ }
+ +
+ return bh->ptr[pos]; + return bh->ptr[pos];
+
+error:
+ ctx->cancel = true;
+ return 0;
+} +}
+ +
+static void +static void
@ -460,6 +481,7 @@
+ hdr_buf[i] = rc_read(ctx); + hdr_buf[i] = rc_read(ctx);
+ } + }
+ +
+ ctx->n_buffers = 0;
+ ctx->pos = 0; + ctx->pos = 0;
+ get_buffer(ctx); + get_buffer(ctx);
+ ctx->active = true; + ctx->active = true;
@ -554,11 +576,6 @@
+ unlzma_reset_buf(ctx); + unlzma_reset_buf(ctx);
+ ctx->cancel = false; + ctx->cancel = false;
+ ctx->active = false; + ctx->active = false;
+ while (ctx->head) {
+ struct unlzma_buffer *bh = ctx->head;
+ ctx->head = bh->last;
+ kfree(bh);
+ }
+ } while (!kthread_should_stop()); + } while (!kthread_should_stop());
+ mutex_unlock(&ctx->mutex); + mutex_unlock(&ctx->mutex);
+ return 0; + return 0;
@ -598,6 +615,10 @@
+ unlzma_cancel(ctx); + unlzma_cancel(ctx);
+ kthread_stop(ctx->thread); + kthread_stop(ctx->thread);
+ ctx->thread = NULL; + ctx->thread = NULL;
+ if (ctx->buffers)
+ kfree(ctx->buffers);
+ ctx->buffers_max = 0;
+ ctx->buffers = NULL;
+ } + }
+} +}
+ +
@ -605,13 +626,36 @@
+unlzma_decompress_setup(struct crypto_pcomp *tfm, void *p, unsigned int len) +unlzma_decompress_setup(struct crypto_pcomp *tfm, void *p, unsigned int len)
+{ +{
+ struct unlzma_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); + struct unlzma_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+ struct nlattr *tb[UNLZMA_DECOMP_MAX + 1];
+ int ret = 0; + int ret = 0;
+ +
+ if (ctx->thread) + if (ctx->thread)
+ return 0; + return -EINVAL;
+ +
+ if (!p)
+ return -EINVAL;
+
+ ret = nla_parse(tb, UNLZMA_DECOMP_MAX, p, len, NULL);
+ if (!tb[UNLZMA_DECOMP_OUT_BUFFERS])
+ return -EINVAL;
+
+ if (ctx->buffers_max && (ctx->buffers_max <
+ nla_get_u32(tb[UNLZMA_DECOMP_OUT_BUFFERS]))) {
+ kfree(ctx->buffers);
+ ctx->buffers_max = 0;
+ ctx->buffers = NULL;
+ }
+ if (!ctx->buffers) {
+ ctx->buffers_max = nla_get_u32(tb[UNLZMA_DECOMP_OUT_BUFFERS]);
+ ctx->buffers = kzalloc(sizeof(struct unlzma_buffer) * ctx->buffers_max, GFP_KERNEL);
+ }
+ if (!ctx->buffers)
+ return -ENOMEM;
+
+ ctx->waiting = false;
+ mutex_init(&ctx->mutex); + mutex_init(&ctx->mutex);
+ init_waitqueue_head(&ctx->next_req); + init_waitqueue_head(&ctx->next_req);
+ init_waitqueue_head(&ctx->req_done);
+ ctx->thread = kthread_run(unlzma_thread, ctx, "unlzma/%d", instance++); + ctx->thread = kthread_run(unlzma_thread, ctx, "unlzma/%d", instance++);
+ if (IS_ERR(ctx->thread)) { + if (IS_ERR(ctx->thread)) {
+ ret = PTR_ERR(ctx->thread); + ret = PTR_ERR(ctx->thread);
@ -624,21 +668,22 @@
+static int +static int
+unlzma_decompress_init(struct crypto_pcomp *tfm) +unlzma_decompress_init(struct crypto_pcomp *tfm)
+{ +{
+ struct unlzma_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
+
+ ctx->pos = 0;
+ return 0; + return 0;
+} +}
+ +
+static void +static void
+unlzma_wait_complete(struct unlzma_ctx *ctx, bool finish) +unlzma_wait_complete(struct unlzma_ctx *ctx, bool finish)
+{ +{
+ DEFINE_WAIT(__wait);
+
+ do { + do {
+ mutex_unlock(&ctx->mutex);
+ wake_up(&ctx->next_req); + wake_up(&ctx->next_req);
+ prepare_to_wait(&ctx->req_done, &__wait, TASK_INTERRUPTIBLE);
+ mutex_unlock(&ctx->mutex);
+ schedule(); + schedule();
+ mutex_lock(&ctx->mutex); + mutex_lock(&ctx->mutex);
+ } while (ctx->active && (ctx->avail_in > 0) && (ctx->avail_out > 0)); + } while (!ctx->waiting && ctx->active);
+ finish_wait(&ctx->req_done, &__wait);
+} +}
+ +
+static int +static int
@ -652,6 +697,7 @@
+ goto out; + goto out;
+ +
+ pos = ctx->pos; + pos = ctx->pos;
+ ctx->waiting = false;
+ ctx->next_in = req->next_in; + ctx->next_in = req->next_in;
+ ctx->avail_in = req->avail_in; + ctx->avail_in = req->avail_in;
+ ctx->next_out = req->next_out; + ctx->next_out = req->next_out;
@ -669,6 +715,9 @@
+ +
+out: +out:
+ mutex_unlock(&ctx->mutex); + mutex_unlock(&ctx->mutex);
+ if (ctx->cancel)
+ return -EINVAL;
+
+ return pos; + return pos;
+} +}
+ +
@ -832,3 +881,18 @@
+#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS) +#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
+ +
+#endif +#endif
--- a/include/crypto/compress.h
+++ b/include/crypto/compress.h
@@ -49,6 +49,12 @@ enum zlib_decomp_params {
#define ZLIB_DECOMP_MAX (__ZLIB_DECOMP_MAX - 1)
+enum unlzma_decomp_params {
+ UNLZMA_DECOMP_OUT_BUFFERS = 1, /* naximum number of output buffers */
+ __UNLZMA_DECOMP_MAX,
+};
+#define UNLZMA_DECOMP_MAX (__UNLZMA_DECOMP_MAX - 1)
+
struct crypto_pcomp {
struct crypto_tfm base;