@@ -20,6 +20,63 @@
#define SDAP_BITS_SIZE (SDAP_BYTE_SIZE * 8)
#endif
+/**
+ * rta_inline_pdcp_query() - Provide indications if a key can be passed as
+ * immediate data or shall be referenced in a
+ * shared descriptor.
+ * Return: 0 if data can be inlined or 1 if referenced.
+ */
+static inline int
+rta_inline_pdcp_sdap_query(enum auth_type_pdcp auth_alg,
+ enum cipher_type_pdcp cipher_alg,
+ enum pdcp_sn_size sn_size,
+ int8_t hfn_ovd)
+{
+ int nb_key_to_inline = 0;
+
+ if ((cipher_alg != PDCP_CIPHER_TYPE_NULL) &&
+ (auth_alg != PDCP_AUTH_TYPE_NULL))
+ return 2;
+ else
+ return 0;
+
+ /**
+ * Shared Descriptors for some of the cases does not fit in the
+ * MAX_DESC_SIZE of the descriptor
+ * The cases which exceed are for RTA_SEC_ERA=8 and HFN override
+ * enabled and 12/18 bit uplane and either of following Algo combo.
+ * - AES-SNOW
+ * - AES-ZUC
+ * - SNOW-SNOW
+ * - SNOW-ZUC
+ * - ZUC-SNOW
+ * - ZUC-SNOW
+ *
+ * We cannot make inline for all cases, as this will impact performance
+ * due to extra memory accesses for the keys.
+ */
+
+ /* Inline only the cipher key */
+ if ((rta_sec_era == RTA_SEC_ERA_8) && hfn_ovd &&
+ ((sn_size == PDCP_SN_SIZE_12) ||
+ (sn_size == PDCP_SN_SIZE_18)) &&
+ (cipher_alg != PDCP_CIPHER_TYPE_NULL) &&
+ ((auth_alg == PDCP_AUTH_TYPE_SNOW) ||
+ (auth_alg == PDCP_AUTH_TYPE_ZUC))) {
+
+ nb_key_to_inline++;
+
+ /* Sub case where inlining another key is required */
+ if ((cipher_alg == PDCP_CIPHER_TYPE_AES) &&
+ (auth_alg == PDCP_AUTH_TYPE_SNOW))
+ nb_key_to_inline++;
+ }
+
+ /* Inline both keys */
+
+ return nb_key_to_inline;
+}
+
static inline void key_loading_opti(struct program *p,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -788,8 +845,8 @@ pdcp_sdap_insert_cplane_null_op(struct program *p,
unsigned char era_2_sw_hfn_ovrd,
enum pdb_type_e pdb_type __maybe_unused)
{
- return pdcp_insert_cplane_int_only_op(p, swap, cipherdata, authdata,
- dir, sn_size, era_2_sw_hfn_ovrd);
+ return pdcp_insert_cplane_null_op(p, swap, cipherdata, authdata, dir,
+ sn_size, era_2_sw_hfn_ovrd);
}
static inline int
@@ -3261,12 +3261,28 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
goto out;
}
- if (rta_inline_pdcp_query(authdata.algtype,
- cipherdata.algtype,
- session->pdcp.sn_size,
- session->pdcp.hfn_ovd)) {
- cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
- cipherdata.key_type = RTA_DATA_PTR;
+ if (pdcp_xform->sdap_enabled) {
+ int nb_keys_to_inline =
+ rta_inline_pdcp_sdap_query(authdata.algtype,
+ cipherdata.algtype,
+ session->pdcp.sn_size,
+ session->pdcp.hfn_ovd);
+ if (nb_keys_to_inline >= 1) {
+ cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (nb_keys_to_inline >= 2) {
+ authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+ } else {
+ if (rta_inline_pdcp_query(authdata.algtype,
+ cipherdata.algtype,
+ session->pdcp.sn_size,
+ session->pdcp.hfn_ovd)) {
+ cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
}
if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {