diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-01 06:27:13 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-01 06:27:13 +0200 |
commit | 5d95ff84e62be914b4a4dabfa814e4096b05b1b0 (patch) | |
tree | f2d79d562971025b29deab50bb06e3b865f185b3 /drivers/crypto/marvell | |
parent | xtensa: fix NOMMU build with lock_mm_and_find_vma() conversion (diff) | |
parent | crypto: akcipher - Do not copy dst if it is NULL (diff) | |
download | linux-5d95ff84e62be914b4a4dabfa814e4096b05b1b0.tar.xz linux-5d95ff84e62be914b4a4dabfa814e4096b05b1b0.zip |
Merge tag 'v6.5-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Add linear akcipher/sig API
- Add tfm cloning (hmac, cmac)
- Add statesize to crypto_ahash
Algorithms:
- Allow only odd e and restrict value in FIPS mode for RSA
- Replace LFSR with SHA3-256 in jitter
- Add interface for gathering of raw entropy in jitter
Drivers:
- Fix race on data_avail and actual data in hwrng/virtio
- Add hash and HMAC support in starfive
- Add RSA algo support in starfive
- Add support for PCI device 0x156E in ccp"
* tag 'v6.5-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (85 commits)
crypto: akcipher - Do not copy dst if it is NULL
crypto: sig - Fix verify call
crypto: akcipher - Set request tfm on sync path
crypto: sm2 - Provide sm2_compute_z_digest when sm2 is disabled
hwrng: imx-rngc - switch to DEFINE_SIMPLE_DEV_PM_OPS
hwrng: st - keep clock enabled while hwrng is registered
hwrng: st - support compile-testing
hwrng: imx-rngc - fix the timeout for init and self check
KEYS: asymmetric: Use new crypto interface without scatterlists
KEYS: asymmetric: Move sm2 code into x509_public_key
KEYS: Add forward declaration in asymmetric-parser.h
crypto: sig - Add interface for sign/verify
crypto: akcipher - Add sync interface without SG lists
crypto: cipher - On clone do crypto_mod_get()
crypto: api - Add __crypto_alloc_tfmgfp
crypto: api - Remove crypto_init_ops()
crypto: rsa - allow only odd e and restrict value in FIPS mode
crypto: geniv - Split geniv out of AEAD Kconfig option
crypto: algboss - Add missing dependency on RNG2
crypto: starfive - Add RSA algo support
...
Diffstat (limited to 'drivers/crypto/marvell')
-rw-r--r-- | drivers/crypto/marvell/cesa/cipher.c | 2 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cpt_common.h | 15 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c | 3 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cptlf.c | 34 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cptlf.h | 33 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cptpf.h | 7 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c | 41 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c | 247 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c | 10 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cptvf.h | 1 | ||||
-rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c | 8 |
11 files changed, 360 insertions, 41 deletions
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index c6f2fa753b7c..0f37dfd42d85 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -297,7 +297,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int len) { - struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); + struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher); int err; err = verify_skcipher_des3_key(cipher, key); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h index 6019066a6451..46b778bbbee4 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h @@ -40,11 +40,26 @@ enum otx2_cpt_eng_type { }; /* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */ +#define MBOX_MSG_RX_INLINE_IPSEC_LF_CFG 0xBFE #define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF #define MBOX_MSG_GET_CAPS 0xBFD #define MBOX_MSG_GET_KVF_LIMITS 0xBFC /* + * Message request to config cpt lf for inline inbound ipsec. + * This message is only used between CPT PF <-> CPT VF + */ +struct otx2_cpt_rx_inline_lf_cfg { + struct mbox_msghdr hdr; + u16 sso_pf_func; + u16 param1; + u16 param2; + u16 opcode; + u32 credit; + u32 reserved; +}; + +/* * Message request and response to get engine group number * which has attached a given type of engines (SE, AE, IE) * This messages are only used between CPT PF <=> CPT VF diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c index 115997475beb..273ee5352a50 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c @@ -141,6 +141,8 @@ int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs) req->hdr.sig = OTX2_MBOX_REQ_SIG; req->hdr.pcifunc = 0; req->cptlfs = lfs->lfs_num; + req->cpt_blkaddr = lfs->blkaddr; + req->modify = 1; ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev); if (ret) return ret; @@ -168,6 +170,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs) req->hdr.id = MBOX_MSG_DETACH_RESOURCES; req->hdr.sig = OTX2_MBOX_REQ_SIG; req->hdr.pcifunc = 0; + req->cptlfs = 1; ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev); if (ret) return ret; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c index 71e5f79431af..6edd27ff8c4e 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c @@ -13,10 +13,10 @@ static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf, { union otx2_cptx_lf_done_wait done_wait; - done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, - OTX2_CPT_LF_DONE_WAIT); + done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, + lf->slot, OTX2_CPT_LF_DONE_WAIT); done_wait.s.time_wait = time_wait; - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, OTX2_CPT_LF_DONE_WAIT, done_wait.u); } @@ -24,10 +24,10 @@ static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait) { union otx2_cptx_lf_done_wait done_wait; - done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, - OTX2_CPT_LF_DONE_WAIT); + done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, + lf->slot, OTX2_CPT_LF_DONE_WAIT); done_wait.s.num_wait = num_wait; - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, OTX2_CPT_LF_DONE_WAIT, done_wait.u); } @@ -147,7 +147,7 @@ static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable) irq_misc.s.nwrp = 0x1; for (slot = 0; slot < lfs->lfs_num; slot++) - otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg, + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, irq_misc.u); } @@ -157,7 +157,7 @@ static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs) /* Enable done interrupts */ for (slot = 0; slot < lfs->lfs_num; slot++) - otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1); /* Enable Misc interrupts */ cptlf_set_misc_intrs(lfs, true); @@ -168,7 +168,7 @@ static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs) int slot; for (slot = 0; slot < lfs->lfs_num; slot++) - otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1); cptlf_set_misc_intrs(lfs, false); } @@ -177,7 +177,7 @@ static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf) { union otx2_cptx_lf_done irq_cnt; - irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, OTX2_CPT_LF_DONE); return irq_cnt.s.done; } @@ -189,8 +189,8 @@ static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg) struct device *dev; dev = &lf->lfs->pdev->dev; - irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, - OTX2_CPT_LF_MISC_INT); + irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, + lf->slot, OTX2_CPT_LF_MISC_INT); irq_misc_ack.u = 0x0; if (irq_misc.s.fault) { @@ -222,7 +222,7 @@ static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg) } /* Acknowledge interrupts */ - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, OTX2_CPT_LF_MISC_INT, irq_misc_ack.u); return IRQ_HANDLED; @@ -237,13 +237,13 @@ static irqreturn_t cptlf_done_intr_handler(int irq, void *arg) /* Read the number of completed requests */ irq_cnt = cptlf_read_done_cnt(lf); if (irq_cnt) { - done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, + done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, OTX2_CPT_LF_DONE_WAIT); /* Acknowledge the number of completed requests */ - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, OTX2_CPT_LF_DONE_ACK, irq_cnt); - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, OTX2_CPT_LF_DONE_WAIT, done_wait.u); if (unlikely(!lf->wqe)) { dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n", @@ -393,7 +393,7 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri, OTX2_CPT_LMT_LF_LMTLINEX(0)); lfs->lf[slot].ioreg = lfs->reg_base + - OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot, + OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot, OTX2_CPT_LF_NQX(0)); } /* Send request to attach LFs */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h index 4fcaf61a70e3..5302fe3d0e6f 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h @@ -180,7 +180,7 @@ static inline void otx2_cptlf_set_iqueues_base_addr( for (slot = 0; slot < lfs->lfs_num; slot++) { lf_q_base.u = lfs->lf[slot].iqueue.dma_addr; - otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, OTX2_CPT_LF_Q_BASE, lf_q_base.u); } } @@ -191,7 +191,7 @@ static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf) lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 + OTX2_CPT_EXTRA_SIZE_DIV40; - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, OTX2_CPT_LF_Q_SIZE, lf_q_size.u); } @@ -207,15 +207,16 @@ static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf) { union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 }; union otx2_cptx_lf_inprog lf_inprog; + u8 blkaddr = lf->lfs->blkaddr; int timeout = 20; /* Disable instructions enqueuing */ - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, OTX2_CPT_LF_CTL, lf_ctl.u); /* Wait for instruction queue to become empty */ do { - lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, + lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot, OTX2_CPT_LF_INPROG); if (!lf_inprog.s.inflight) break; @@ -234,7 +235,7 @@ static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf) * the queue should be empty at this point */ lf_inprog.s.eena = 0x0; - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, OTX2_CPT_LF_INPROG, lf_inprog.u); } @@ -249,14 +250,15 @@ static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs) static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf, bool enable) { + u8 blkaddr = lf->lfs->blkaddr; union otx2_cptx_lf_ctl lf_ctl; - lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot, OTX2_CPT_LF_CTL); /* Set iqueue's enqueuing */ lf_ctl.s.ena = enable ? 0x1 : 0x0; - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, OTX2_CPT_LF_CTL, lf_ctl.u); } @@ -269,13 +271,14 @@ static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf, bool enable) { union otx2_cptx_lf_inprog lf_inprog; + u8 blkaddr = lf->lfs->blkaddr; - lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot, OTX2_CPT_LF_INPROG); /* Set iqueue's execution */ lf_inprog.s.eena = enable ? 0x1 : 0x0; - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, + otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, OTX2_CPT_LF_INPROG, lf_inprog.u); } @@ -364,6 +367,18 @@ static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs) return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED; } +static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs, + struct pci_dev *pdev, + void __iomem *reg_base, + struct otx2_mbox *mbox, + int blkaddr) +{ + lfs->pdev = pdev; + lfs->reg_base = reg_base; + lfs->mbox = mbox; + lfs->blkaddr = blkaddr; +} + int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri, int lfs_num); void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h index 936174b012e8..a209ec5af381 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h @@ -31,6 +31,7 @@ struct otx2_cptpf_dev { struct otx2_cptvf_info vf[OTX2_CPT_MAX_VFS_NUM]; struct otx2_cpt_eng_grps eng_grps;/* Engine groups information */ struct otx2_cptlfs_info lfs; /* CPT LFs attached to this PF */ + struct otx2_cptlfs_info cpt1_lfs; /* CPT1 LFs attached to this PF */ /* HW capabilities for each engine type */ union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES]; bool is_eng_caps_discovered; @@ -40,6 +41,9 @@ struct otx2_cptpf_dev { struct work_struct afpf_mbox_work; struct workqueue_struct *afpf_mbox_wq; + struct otx2_mbox afpf_mbox_up; + struct work_struct afpf_mbox_up_work; + /* VF <=> PF mbox */ struct otx2_mbox vfpf_mbox; struct workqueue_struct *vfpf_mbox_wq; @@ -52,8 +56,10 @@ struct otx2_cptpf_dev { u8 pf_id; /* RVU PF number */ u8 max_vfs; /* Maximum number of VFs supported by CPT */ u8 enabled_vfs; /* Number of enabled VFs */ + u8 sso_pf_func_ovrd; /* SSO PF_FUNC override bit */ u8 kvf_limits; /* Kernel crypto limits */ bool has_cpt1; + u8 rsrc_req_blkaddr; /* Devlink */ struct devlink *dl; @@ -61,6 +67,7 @@ struct otx2_cptpf_dev { irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg); void otx2_cptpf_afpf_mbox_handler(struct work_struct *work); +void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work); irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg); void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index 30e6acfc93d9..e34223daa327 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -13,6 +13,8 @@ #define OTX2_CPT_DRV_NAME "rvu_cptpf" #define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver" +#define CPT_UC_RID_CN9K_B0 1 + static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf, int num_vfs) { @@ -473,10 +475,19 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf) if (err) goto error; + err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base, + pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1); + if (err) + goto mbox_cleanup; + INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler); + INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler); mutex_init(&cptpf->lock); + return 0; +mbox_cleanup: + otx2_mbox_destroy(&cptpf->afpf_mbox); error: destroy_workqueue(cptpf->afpf_mbox_wq); return err; @@ -486,6 +497,33 @@ static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf) { destroy_workqueue(cptpf->afpf_mbox_wq); otx2_mbox_destroy(&cptpf->afpf_mbox); + otx2_mbox_destroy(&cptpf->afpf_mbox_up); +} + +static ssize_t sso_pf_func_ovrd_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd); +} + +static ssize_t sso_pf_func_ovrd_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev); + u8 sso_pf_func_ovrd; + + if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0)) + return count; + + if (kstrtou8(buf, 0, &sso_pf_func_ovrd)) + return -EINVAL; + + cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd; + + return count; } static ssize_t kvf_limits_show(struct device *dev, @@ -518,8 +556,11 @@ static ssize_t kvf_limits_store(struct device *dev, } static DEVICE_ATTR_RW(kvf_limits); +static DEVICE_ATTR_RW(sso_pf_func_ovrd); + static struct attribute *cptpf_attrs[] = { &dev_attr_kvf_limits.attr, + &dev_attr_sso_pf_func_ovrd.attr, NULL }; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c index dee0aa60b698..480b3720f15a 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c @@ -5,6 +5,20 @@ #include "otx2_cptpf.h" #include "rvu_reg.h" +/* Fastpath ipsec opcode with inplace processing */ +#define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6)) +#define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6)) + +#define cpt_inline_rx_opcode(pdev) \ +({ \ + u8 opcode; \ + if (is_dev_otx2(pdev)) \ + opcode = CPT_INLINE_RX_OPCODE; \ + else \ + opcode = CN10K_CPT_INLINE_RX_OPCODE; \ + (opcode); \ +}) + /* * CPT PF driver version, It will be incremented by 1 for every feature * addition in CPT mailbox messages. @@ -112,6 +126,139 @@ static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf, return 0; } +static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf, + int sso_pf_func, u8 slot) +{ + struct cpt_inline_ipsec_cfg_msg *req; + struct pci_dev *pdev = cptpf->pdev; + + req = (struct cpt_inline_ipsec_cfg_msg *) + otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0, + sizeof(*req), sizeof(struct msg_rsp)); + if (req == NULL) { + dev_err(&pdev->dev, "RVU MBOX failed to get message.\n"); + return -EFAULT; + } + memset(req, 0, sizeof(*req)); + req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG; + req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0); + req->dir = CPT_INLINE_INBOUND; + req->slot = slot; + req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd; + req->sso_pf_func = sso_pf_func; + req->enable = 1; + + return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev); +} + +static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp, + struct otx2_cpt_rx_inline_lf_cfg *req) +{ + struct nix_inline_ipsec_cfg *nix_req; + struct pci_dev *pdev = cptpf->pdev; + int ret; + + nix_req = (struct nix_inline_ipsec_cfg *) + otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0, + sizeof(*nix_req), + sizeof(struct msg_rsp)); + if (nix_req == NULL) { + dev_err(&pdev->dev, "RVU MBOX failed to get message.\n"); + return -EFAULT; + } + memset(nix_req, 0, sizeof(*nix_req)); + nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG; + nix_req->hdr.sig = OTX2_MBOX_REQ_SIG; + nix_req->enable = 1; + if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS) + nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1; + else + nix_req->cpt_credit = req->credit - 1; + nix_req->gen_cfg.egrp = egrp; + if (req->opcode) + nix_req->gen_cfg.opcode = req->opcode; + else + nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev); + nix_req->gen_cfg.param1 = req->param1; + nix_req->gen_cfg.param2 = req->param2; + nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0); + nix_req->inst_qsel.cpt_slot = 0; + ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev); + if (ret) + return ret; + + if (cptpf->has_cpt1) { + ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1); + if (ret) + return ret; + } + + return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0); +} + +static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, + struct mbox_msghdr *req) +{ + struct otx2_cpt_rx_inline_lf_cfg *cfg_req; + u8 egrp; + int ret; + + cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req; + if (cptpf->lfs.lfs_num) { + dev_err(&cptpf->pdev->dev, + "LF is already configured for RX inline ipsec.\n"); + return -EEXIST; + } + /* + * Allow LFs to execute requests destined to only grp IE_TYPES and + * set queue priority of each LF to high + */ + egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES); + if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) { + dev_err(&cptpf->pdev->dev, + "Engine group for inline ipsec is not available\n"); + return -ENOENT; + } + + otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base, + &cptpf->afpf_mbox, BLKADDR_CPT0); + ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO, + 1); + if (ret) { + dev_err(&cptpf->pdev->dev, + "LF configuration failed for RX inline ipsec.\n"); + return ret; + } + + if (cptpf->has_cpt1) { + cptpf->rsrc_req_blkaddr = BLKADDR_CPT1; + otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev, + cptpf->reg_base, &cptpf->afpf_mbox, + BLKADDR_CPT1); + ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp, + OTX2_CPT_QUEUE_HI_PRIO, 1); + if (ret) { + dev_err(&cptpf->pdev->dev, + "LF configuration failed for RX inline ipsec.\n"); + goto lf_cleanup; + } + cptpf->rsrc_req_blkaddr = 0; + } + + ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req); + if (ret) + goto lf1_cleanup; + + return 0; + +lf1_cleanup: + otx2_cptlf_shutdown(&cptpf->cpt1_lfs); +lf_cleanup: + otx2_cptlf_shutdown(&cptpf->lfs); + return ret; +} + static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf, struct otx2_cptvf_info *vf, struct mbox_msghdr *req, int size) @@ -132,6 +279,10 @@ static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf, case MBOX_MSG_GET_KVF_LIMITS: err = handle_msg_kvf_limits(cptpf, vf, req); break; + case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG: + err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req); + break; + default: err = forward_to_af(cptpf, vf, req, size); break; @@ -224,14 +375,28 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work) irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg) { struct otx2_cptpf_dev *cptpf = arg; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; u64 intr; /* Read the interrupt bits */ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT); if (intr & 0x1ULL) { - /* Schedule work queue function to process the MBOX request */ - queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work); + mbox = &cptpf->afpf_mbox; + mdev = &mbox->dev[0]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + /* Schedule work queue function to process the MBOX request */ + queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work); + + mbox = &cptpf->afpf_mbox_up; + mdev = &mbox->dev[0]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + /* Schedule work queue function to process the MBOX request */ + queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work); /* Clear and ack the interrupt */ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL); @@ -242,6 +407,7 @@ irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg) static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg) { + struct otx2_cptlfs_info *lfs = &cptpf->lfs; struct device *dev = &cptpf->pdev->dev; struct cpt_rd_wr_reg_msg *rsp_rd_wr; @@ -254,6 +420,8 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, msg->sig, msg->id); return; } + if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1) + lfs = &cptpf->cpt1_lfs; switch (msg->id) { case MBOX_MSG_READY: @@ -273,11 +441,14 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, break; case MBOX_MSG_ATTACH_RESOURCES: if (!msg->rc) - cptpf->lfs.are_lfs_attached = 1; + lfs->are_lfs_attached = 1; break; case MBOX_MSG_DETACH_RESOURCES: if (!msg->rc) - cptpf->lfs.are_lfs_attached = 0; + lfs->are_lfs_attached = 0; + break; + case MBOX_MSG_CPT_INLINE_IPSEC_CFG: + case MBOX_MSG_NIX_INLINE_IPSEC_CFG: break; default: @@ -367,3 +538,71 @@ void otx2_cptpf_afpf_mbox_handler(struct work_struct *work) } otx2_mbox_reset(afpf_mbox, 0); } + +static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf, + struct mbox_msghdr *msg) +{ + struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg; + struct otx2_cptlfs_info *lfs = &cptpf->lfs; + struct msg_rsp *rsp; + + if (cptpf->lfs.lfs_num) + lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1, + &lfs->lf[0]); + + rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0, + sizeof(*rsp)); + if (!rsp) + return; + + rsp->hdr.id = msg->id; + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; + rsp->hdr.pcifunc = 0; + rsp->hdr.rc = 0; +} + +static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf, + struct mbox_msghdr *msg) +{ + if (msg->id >= MBOX_MSG_MAX) { + dev_err(&cptpf->pdev->dev, + "MBOX msg with unknown ID %d\n", msg->id); + return; + } + + switch (msg->id) { + case MBOX_MSG_CPT_INST_LMTST: + handle_msg_cpt_inst_lmtst(cptpf, msg); + break; + default: + otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id); + } +} + +void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work) +{ + struct otx2_cptpf_dev *cptpf; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + int offset, i; + + cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work); + mbox = &cptpf->afpf_mbox_up; + mdev = &mbox->dev[0]; + /* Sync mbox data into memory */ + smp_wmb(); + + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (i = 0; i < rsp_hdr->num_msgs; i++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + + process_afpf_mbox_up_msg(cptpf, msg); + + offset = mbox->rx_start + msg->next_msgoff; + } + otx2_mbox_msg_send(mbox, 0); +} diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 1577986677f6..1958b797a421 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1504,11 +1504,9 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) if (ret) goto delete_grps; - lfs->pdev = pdev; - lfs->reg_base = cptpf->reg_base; - lfs->mbox = &cptpf->afpf_mbox; - lfs->blkaddr = BLKADDR_CPT0; - ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK, + otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base, + &cptpf->afpf_mbox, BLKADDR_CPT0); + ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK, OTX2_CPT_QUEUE_HI_PRIO, 1); if (ret) goto delete_grps; @@ -1562,7 +1560,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) free_result: kfree(result); lf_cleanup: - otx2_cptlf_shutdown(&cptpf->lfs); + otx2_cptlf_shutdown(lfs); delete_grps: delete_engine_grps(pdev, &cptpf->eng_grps); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h index 4207e2236903..994291e90da1 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h @@ -19,6 +19,7 @@ struct otx2_cptvf_dev { struct otx2_mbox pfvf_mbox; struct work_struct pfvf_mbox_work; struct workqueue_struct *pfvf_mbox_wq; + int blkaddr; void *bbuf_base; unsigned long cap_flag; }; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c index 6023a7adb70c..bac729c885f9 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c @@ -277,12 +277,11 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf) if (ret) return ret; - lfs->reg_base = cptvf->reg_base; - lfs->pdev = cptvf->pdev; - lfs->mbox = &cptvf->pfvf_mbox; - lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits : num_online_cpus(); + + otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base, + &cptvf->pfvf_mbox, cptvf->blkaddr); ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO, lfs_num); if (ret) @@ -380,6 +379,7 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, if (ret) goto destroy_pfvf_mbox; + cptvf->blkaddr = BLKADDR_CPT0; /* Initialize CPT LFs */ ret = cptvf_lf_init(cptvf); if (ret) |