mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
crypto: iaa - Remove dst_null support
Remove the unused dst_null support. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
@@ -1136,8 +1136,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
|
||||
struct idxd_wq *wq,
|
||||
dma_addr_t src_addr, unsigned int slen,
|
||||
dma_addr_t dst_addr, unsigned int *dlen,
|
||||
u32 *compression_crc,
|
||||
bool disable_async)
|
||||
u32 *compression_crc)
|
||||
{
|
||||
struct iaa_device_compression_mode *active_compression_mode;
|
||||
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
@@ -1180,7 +1179,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
|
||||
desc->src2_size = sizeof(struct aecs_comp_table_record);
|
||||
desc->completion_addr = idxd_desc->compl_dma;
|
||||
|
||||
if (ctx->use_irq && !disable_async) {
|
||||
if (ctx->use_irq) {
|
||||
desc->flags |= IDXD_OP_FLAG_RCI;
|
||||
|
||||
idxd_desc->crypto.req = req;
|
||||
@@ -1193,7 +1192,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
|
||||
" src_addr %llx, dst_addr %llx\n", __func__,
|
||||
active_compression_mode->name,
|
||||
src_addr, dst_addr);
|
||||
} else if (ctx->async_mode && !disable_async)
|
||||
} else if (ctx->async_mode)
|
||||
req->base.data = idxd_desc;
|
||||
|
||||
dev_dbg(dev, "%s: compression mode %s,"
|
||||
@@ -1214,7 +1213,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
|
||||
update_total_comp_calls();
|
||||
update_wq_comp_calls(wq);
|
||||
|
||||
if (ctx->async_mode && !disable_async) {
|
||||
if (ctx->async_mode) {
|
||||
ret = -EINPROGRESS;
|
||||
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
|
||||
goto out;
|
||||
@@ -1234,7 +1233,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
|
||||
|
||||
*compression_crc = idxd_desc->iax_completion->crc;
|
||||
|
||||
if (!ctx->async_mode || disable_async)
|
||||
if (!ctx->async_mode)
|
||||
idxd_free_desc(wq, idxd_desc);
|
||||
out:
|
||||
return ret;
|
||||
@@ -1500,13 +1499,11 @@ static int iaa_comp_acompress(struct acomp_req *req)
|
||||
struct iaa_compression_ctx *compression_ctx;
|
||||
struct crypto_tfm *tfm = req->base.tfm;
|
||||
dma_addr_t src_addr, dst_addr;
|
||||
bool disable_async = false;
|
||||
int nr_sgs, cpu, ret = 0;
|
||||
struct iaa_wq *iaa_wq;
|
||||
u32 compression_crc;
|
||||
struct idxd_wq *wq;
|
||||
struct device *dev;
|
||||
int order = -1;
|
||||
|
||||
compression_ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
@@ -1536,21 +1533,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
|
||||
|
||||
iaa_wq = idxd_wq_get_private(wq);
|
||||
|
||||
if (!req->dst) {
|
||||
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
|
||||
|
||||
/* incompressible data will always be < 2 * slen */
|
||||
req->dlen = 2 * req->slen;
|
||||
order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
|
||||
req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
|
||||
if (!req->dst) {
|
||||
ret = -ENOMEM;
|
||||
order = -1;
|
||||
goto out;
|
||||
}
|
||||
disable_async = true;
|
||||
}
|
||||
|
||||
dev = &wq->idxd->pdev->dev;
|
||||
|
||||
nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
|
||||
@@ -1580,7 +1562,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
|
||||
req->dst, req->dlen, sg_dma_len(req->dst));
|
||||
|
||||
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
|
||||
&req->dlen, &compression_crc, disable_async);
|
||||
&req->dlen, &compression_crc);
|
||||
if (ret == -EINPROGRESS)
|
||||
return ret;
|
||||
|
||||
@@ -1611,100 +1593,6 @@ err_map_dst:
|
||||
out:
|
||||
iaa_wq_put(wq);
|
||||
|
||||
if (order >= 0)
|
||||
sgl_free_order(req->dst, order);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
|
||||
{
|
||||
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
struct crypto_tfm *tfm = req->base.tfm;
|
||||
dma_addr_t src_addr, dst_addr;
|
||||
int nr_sgs, cpu, ret = 0;
|
||||
struct iaa_wq *iaa_wq;
|
||||
struct device *dev;
|
||||
struct idxd_wq *wq;
|
||||
int order = -1;
|
||||
|
||||
cpu = get_cpu();
|
||||
wq = wq_table_next_wq(cpu);
|
||||
put_cpu();
|
||||
if (!wq) {
|
||||
pr_debug("no wq configured for cpu=%d\n", cpu);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = iaa_wq_get(wq);
|
||||
if (ret) {
|
||||
pr_debug("no wq available for cpu=%d\n", cpu);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
iaa_wq = idxd_wq_get_private(wq);
|
||||
|
||||
dev = &wq->idxd->pdev->dev;
|
||||
|
||||
nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
|
||||
if (nr_sgs <= 0 || nr_sgs > 1) {
|
||||
dev_dbg(dev, "couldn't map src sg for iaa device %d,"
|
||||
" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
|
||||
iaa_wq->wq->id, ret);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
src_addr = sg_dma_address(req->src);
|
||||
dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
|
||||
" req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
|
||||
req->src, req->slen, sg_dma_len(req->src));
|
||||
|
||||
req->dlen = 4 * req->slen; /* start with ~avg comp rato */
|
||||
alloc_dest:
|
||||
order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
|
||||
req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
|
||||
if (!req->dst) {
|
||||
ret = -ENOMEM;
|
||||
order = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
|
||||
if (nr_sgs <= 0 || nr_sgs > 1) {
|
||||
dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
|
||||
" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
|
||||
iaa_wq->wq->id, ret);
|
||||
ret = -EIO;
|
||||
goto err_map_dst;
|
||||
}
|
||||
|
||||
dst_addr = sg_dma_address(req->dst);
|
||||
dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
|
||||
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
|
||||
req->dst, req->dlen, sg_dma_len(req->dst));
|
||||
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
|
||||
dst_addr, &req->dlen, true);
|
||||
if (ret == -EOVERFLOW) {
|
||||
dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
|
||||
req->dlen *= 2;
|
||||
if (req->dlen > CRYPTO_ACOMP_DST_MAX)
|
||||
goto err_map_dst;
|
||||
goto alloc_dest;
|
||||
}
|
||||
|
||||
if (ret != 0)
|
||||
dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
|
||||
|
||||
dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
|
||||
err_map_dst:
|
||||
dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
|
||||
out:
|
||||
iaa_wq_put(wq);
|
||||
|
||||
if (order >= 0)
|
||||
sgl_free_order(req->dst, order);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1727,9 +1615,6 @@ static int iaa_comp_adecompress(struct acomp_req *req)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!req->dst)
|
||||
return iaa_comp_adecompress_alloc_dest(req);
|
||||
|
||||
cpu = get_cpu();
|
||||
wq = wq_table_next_wq(cpu);
|
||||
put_cpu();
|
||||
@@ -1810,19 +1695,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dst_free(struct scatterlist *sgl)
|
||||
{
|
||||
/*
|
||||
* Called for req->dst = NULL cases but we free elsewhere
|
||||
* using sgl_free_order().
|
||||
*/
|
||||
}
|
||||
|
||||
static struct acomp_alg iaa_acomp_fixed_deflate = {
|
||||
.init = iaa_comp_init_fixed,
|
||||
.compress = iaa_comp_acompress,
|
||||
.decompress = iaa_comp_adecompress,
|
||||
.dst_free = dst_free,
|
||||
.base = {
|
||||
.cra_name = "deflate",
|
||||
.cra_driver_name = "deflate-iaa",
|
||||
|
||||
Reference in New Issue
Block a user