535.146.02

This commit is contained in:
Bernhard Stoeckner
2023-12-07 15:09:52 +01:00
parent e573018659
commit 7165299dee
77 changed files with 965 additions and 362 deletions

View File

@@ -23,10 +23,16 @@
#include "internal_crypt_lib.h"
#ifdef USE_LKCA
#ifndef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT
#include <crypto/internal/hash.h>
#endif
#endif
void *lkca_hash_new(const char* alg_name)
{
#ifndef USE_LKCA
return false;
return NULL;
#else
//XXX: can we reuse crypto_shash part and just allocate desc
struct crypto_shash *alg;
@@ -87,9 +93,24 @@ bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src)
struct crypto_shash *src_tfm = src->tfm;
struct crypto_shash *dst_tfm = dst->tfm;
int ss = crypto_shash_statesize(dst_tfm);
#ifdef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT
char *src_ipad = crypto_tfm_ctx_aligned(&src_tfm->base);
char *dst_ipad = crypto_tfm_ctx_aligned(&dst_tfm->base);
int ss = crypto_shash_statesize(dst_tfm);
#else
int ctx_size = crypto_shash_alg(dst_tfm)->base.cra_ctxsize;
char *src_ipad = crypto_shash_ctx(src_tfm);
char *dst_ipad = crypto_shash_ctx(dst_tfm);
/*
* Actual struct definition is hidden, so I assume data we need is at
* the end. In 6.0 the struct has a pointer to crpyto_shash followed by:
* 'u8 ipad[statesize];', then 'u8 opad[statesize];'
*/
src_ipad += ctx_size - 2 * ss;
dst_ipad += ctx_size - 2 * ss;
#endif
memcpy(dst_ipad, src_ipad, crypto_shash_blocksize(src->tfm));
memcpy(dst_ipad + ss, src_ipad + ss, crypto_shash_blocksize(src->tfm));
crypto_shash_clear_flags(dst->tfm, CRYPTO_TFM_NEED_KEY);

View File

@@ -316,14 +316,14 @@ int nvidia_p2p_init_mapping(
return -ENOTSUPP;
}
EXPORT_SYMBOL(nvidia_p2p_init_mapping);
NV_EXPORT_SYMBOL(nvidia_p2p_init_mapping);
int nvidia_p2p_destroy_mapping(uint64_t p2p_token)
{
return -ENOTSUPP;
}
EXPORT_SYMBOL(nvidia_p2p_destroy_mapping);
NV_EXPORT_SYMBOL(nvidia_p2p_destroy_mapping);
static void nv_p2p_mem_info_free_callback(void *data)
{
@@ -587,7 +587,7 @@ int nvidia_p2p_get_pages(
p2p_token, va_space, virtual_address,
length, page_table, free_callback, data);
}
EXPORT_SYMBOL(nvidia_p2p_get_pages);
NV_EXPORT_SYMBOL(nvidia_p2p_get_pages);
int nvidia_p2p_get_pages_persistent(
uint64_t virtual_address,
@@ -605,7 +605,7 @@ int nvidia_p2p_get_pages_persistent(
virtual_address, length, page_table,
NULL, NULL);
}
EXPORT_SYMBOL(nvidia_p2p_get_pages_persistent);
NV_EXPORT_SYMBOL(nvidia_p2p_get_pages_persistent);
/*
* This function is a no-op, but is left in place (for now), in order to allow
@@ -618,7 +618,7 @@ int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table)
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_free_page_table);
NV_EXPORT_SYMBOL(nvidia_p2p_free_page_table);
int nvidia_p2p_put_pages(
uint64_t p2p_token,
@@ -650,7 +650,7 @@ int nvidia_p2p_put_pages(
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_put_pages);
NV_EXPORT_SYMBOL(nvidia_p2p_put_pages);
int nvidia_p2p_put_pages_persistent(
uint64_t virtual_address,
@@ -690,7 +690,7 @@ int nvidia_p2p_put_pages_persistent(
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_put_pages_persistent);
NV_EXPORT_SYMBOL(nvidia_p2p_put_pages_persistent);
int nvidia_p2p_dma_map_pages(
struct pci_dev *peer,
@@ -805,7 +805,7 @@ failed:
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
NV_EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
int nvidia_p2p_dma_unmap_pages(
struct pci_dev *peer,
@@ -845,7 +845,7 @@ int nvidia_p2p_dma_unmap_pages(
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
NV_EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
/*
* This function is a no-op, but is left in place (for now), in order to allow
@@ -860,7 +860,7 @@ int nvidia_p2p_free_dma_mapping(
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);
NV_EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);
int nvidia_p2p_register_rsync_driver(
nvidia_p2p_rsync_driver_t *driver,
@@ -889,7 +889,7 @@ int nvidia_p2p_register_rsync_driver(
driver->wait_for_rsync, data);
}
EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver);
NV_EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver);
void nvidia_p2p_unregister_rsync_driver(
nvidia_p2p_rsync_driver_t *driver,
@@ -921,7 +921,7 @@ void nvidia_p2p_unregister_rsync_driver(
driver->wait_for_rsync, data);
}
EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver);
NV_EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver);
int nvidia_p2p_get_rsync_registers(
nvidia_p2p_rsync_reg_info_t **reg_info
@@ -1014,7 +1014,7 @@ int nvidia_p2p_get_rsync_registers(
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
NV_EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
void nvidia_p2p_put_rsync_registers(
nvidia_p2p_rsync_reg_info_t *reg_info
@@ -1046,4 +1046,4 @@ void nvidia_p2p_put_rsync_registers(
os_free_mem(reg_info);
}
EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);
NV_EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);

View File

@@ -1224,12 +1224,11 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
rm_read_registry_dword(sp, nv, NV_REG_ENABLE_MSI, &msi_config);
if (msi_config == 1)
{
if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSIX))
if (nvl->pci_dev->msix_cap && rm_is_msix_allowed(sp, nv))
{
nv_init_msix(nv);
}
if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSI) &&
!(nv->flags & NV_FLAG_USES_MSIX))
if (nvl->pci_dev->msi_cap && !(nv->flags & NV_FLAG_USES_MSIX))
{
nv_init_msi(nv);
}

View File

@@ -195,6 +195,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
NV_CONFTEST_FUNCTION_COMPILE_TESTS += crypto_tfm_ctx_aligned
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active