mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-02-01 05:59:48 +00:00
550.40.07
This commit is contained in:
@@ -7,6 +7,8 @@
|
||||
#ifndef CRYPTLIB_CERT_H
|
||||
#define CRYPTLIB_CERT_H
|
||||
|
||||
#if LIBSPDM_CERT_PARSE_SUPPORT
|
||||
|
||||
/**
|
||||
* Retrieve the tag and length of the tag.
|
||||
*
|
||||
@@ -199,7 +201,7 @@ extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, si
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] usage Key usage bytes.
|
||||
* @param[in, out] usage_size Key usage buffer sizs in bytes.
|
||||
* @param[in, out] usage_size Key usage buffer size in bytes.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
@@ -214,7 +216,7 @@ extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] basic_constraints Basic constraints bytes.
|
||||
* @param[in, out] basic_constraints_size Basic constraints buffer sizs in bytes.
|
||||
* @param[in, out] basic_constraints_size Basic constraints buffer size in bytes.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
@@ -250,16 +252,16 @@ extern bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
|
||||
*
|
||||
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
|
||||
* where the first certificate is signed by the Root
|
||||
* Certificate or is the Root Cerificate itself. and
|
||||
* subsequent cerificate is signed by the preceding
|
||||
* cerificate.
|
||||
* Certificate or is the Root Certificate itself. and
|
||||
* subsequent certificate is signed by the preceding
|
||||
* certificate.
|
||||
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
|
||||
*
|
||||
* @param[in] root_cert Trusted Root Certificate buffer.
|
||||
*
|
||||
* @param[in] root_cert_length Trusted Root Certificate buffer length.
|
||||
*
|
||||
* @retval true All cerificates were issued by the first certificate in X509Certchain.
|
||||
* @retval true All certificates were issued by the first certificate in X509Certchain.
|
||||
* @retval false Invalid certificate or the certificate was not issued by the given
|
||||
* trusted CA.
|
||||
**/
|
||||
@@ -272,12 +274,12 @@ extern bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root
|
||||
*
|
||||
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
|
||||
* where the first certificate is signed by the Root
|
||||
* Certificate or is the Root Cerificate itself. and
|
||||
* subsequent cerificate is signed by the preceding
|
||||
* cerificate.
|
||||
* Certificate or is the Root Certificate itself. and
|
||||
* subsequent certificate is signed by the preceding
|
||||
* certificate.
|
||||
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
|
||||
*
|
||||
* @param[in] cert_index Index of certificate. If index is -1 indecate the
|
||||
* @param[in] cert_index Index of certificate. If index is -1 indicates the
|
||||
* last certificate in cert_chain.
|
||||
*
|
||||
* @param[out] cert The certificate at the index of cert_chain.
|
||||
@@ -301,7 +303,7 @@ extern bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] rsa_context Pointer to new-generated RSA context which contain the retrieved
|
||||
* @param[out] rsa_context Pointer to newly generated RSA context which contain the retrieved
|
||||
* RSA public key component. Use libspdm_rsa_free() function to free the
|
||||
* resource.
|
||||
*
|
||||
@@ -319,7 +321,7 @@ extern bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cer
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] ec_context Pointer to new-generated EC DSA context which contain the retrieved
|
||||
* @param[out] ec_context Pointer to newly generated EC DSA context which contain the retrieved
|
||||
* EC public key component. Use libspdm_ec_free() function to free the
|
||||
* resource.
|
||||
*
|
||||
@@ -340,7 +342,7 @@ extern bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] ecd_context Pointer to new-generated Ed DSA context which contain the retrieved
|
||||
* @param[out] ecd_context Pointer to newly generated Ed DSA context which contain the retrieved
|
||||
* Ed public key component. Use libspdm_ecd_free() function to free the
|
||||
* resource.
|
||||
*
|
||||
@@ -361,7 +363,7 @@ extern bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cer
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] sm2_context Pointer to new-generated sm2 context which contain the retrieved
|
||||
* @param[out] sm2_context Pointer to newly generated sm2 context which contain the retrieved
|
||||
* sm2 public key component. Use sm2_free() function to free the
|
||||
* resource.
|
||||
*
|
||||
@@ -376,41 +378,6 @@ extern bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cer
|
||||
void **sm2_context);
|
||||
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
|
||||
|
||||
#if LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP
|
||||
/**
|
||||
* Generate a CSR.
|
||||
*
|
||||
* @param[in] hash_nid hash algo for sign
|
||||
* @param[in] asym_nid asym algo for sign
|
||||
*
|
||||
* @param[in] requester_info requester info to gen CSR
|
||||
* @param[in] requester_info_length The len of requester info
|
||||
*
|
||||
* @param[in] context Pointer to asymmetric context
|
||||
* @param[in] subject_name Subject name: should be break with ',' in the middle
|
||||
* example: "C=AA,CN=BB"
|
||||
*
|
||||
* Subject names should contain a comma-separated list of OID types and values:
|
||||
* The valid OID type name is in:
|
||||
* {"CN", "commonName", "C", "countryName", "O", "organizationName","L",
|
||||
* "OU", "organizationalUnitName", "ST", "stateOrProvinceName", "emailAddress",
|
||||
* "serialNumber", "postalAddress", "postalCode", "dnQualifier", "title",
|
||||
* "SN","givenName","GN", "initials", "pseudonym", "generationQualifier", "domainComponent", "DC"}.
|
||||
* Note: The object of C and countryName should be CSR Supported Country Codes
|
||||
*
|
||||
* @param[in] csr_len For input, csr_len is the size of store CSR buffer.
|
||||
* For output, csr_len is CSR len for DER format
|
||||
* @param[in] csr_pointer For input, csr_pointer is buffer address to store CSR.
|
||||
* For output, csr_pointer is address for stored CSR.
|
||||
* The csr_pointer address will be changed.
|
||||
*
|
||||
* @retval true Success.
|
||||
* @retval false Failed to gen CSR.
|
||||
**/
|
||||
extern bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid,
|
||||
uint8_t *requester_info, size_t requester_info_length,
|
||||
void *context, char *subject_name,
|
||||
size_t *csr_len, uint8_t **csr_pointer);
|
||||
#endif /* LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP */
|
||||
#endif /* LIBSPDM_CERT_PARSE_SUPPORT */
|
||||
|
||||
#endif /* CRYPTLIB_CERT_H */
|
||||
|
||||
@@ -28,6 +28,44 @@ extern void *libspdm_ec_new_by_nid(size_t nid);
|
||||
* @param[in] ec_context Pointer to the EC context to be released.
|
||||
**/
|
||||
extern void libspdm_ec_free(void *ec_context);
|
||||
|
||||
#if LIBSPDM_FIPS_MODE
|
||||
/**
|
||||
* Sets the private key component into the established EC context.
|
||||
*
|
||||
* For P-256, the private_key_size is 32 byte.
|
||||
* For P-384, the private_key_size is 48 byte.
|
||||
* For P-521, the private_key_size is 66 byte.
|
||||
*
|
||||
* @param[in, out] ec_context Pointer to EC context being set.
|
||||
* @param[in] private_key Pointer to the private key buffer.
|
||||
* @param[in] private_key_size The size of private key buffer in bytes.
|
||||
*
|
||||
* @retval true EC private key component was set successfully.
|
||||
* @retval false Invalid EC private key component.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_ec_set_priv_key(void *ec_context, const uint8_t *private_key,
|
||||
size_t private_key_size);
|
||||
|
||||
/**
|
||||
* Sets the public key component into the established EC context.
|
||||
*
|
||||
* For P-256, the public_size is 64. first 32-byte is X, second 32-byte is Y.
|
||||
* For P-384, the public_size is 96. first 48-byte is X, second 48-byte is Y.
|
||||
* For P-521, the public_size is 132. first 66-byte is X, second 66-byte is Y.
|
||||
*
|
||||
* @param[in, out] ec_context Pointer to EC context being set.
|
||||
* @param[in] public Pointer to the buffer to receive generated public X,Y.
|
||||
* @param[in] public_size The size of public buffer in bytes.
|
||||
*
|
||||
* @retval true EC public key component was set successfully.
|
||||
* @retval false Invalid EC public key component.
|
||||
**/
|
||||
extern bool libspdm_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
|
||||
size_t public_key_size);
|
||||
#endif /* LIBSPDM_FIPS_MODE */
|
||||
|
||||
#endif /* (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT) */
|
||||
|
||||
#if LIBSPDM_ECDHE_SUPPORT
|
||||
@@ -99,6 +137,29 @@ extern bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
|
||||
#endif /* LIBSPDM_ECDHE_SUPPORT */
|
||||
|
||||
#if LIBSPDM_ECDSA_SUPPORT
|
||||
/**
|
||||
* Generates Elliptic Curve context from DER-encoded public key data.
|
||||
*
|
||||
* The public key is ASN.1 DER-encoded as RFC7250 describes,
|
||||
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
|
||||
*
|
||||
* @param[in] der_data Pointer to the DER-encoded public key data.
|
||||
* @param[in] der_size Size of the DER-encoded public key data in bytes.
|
||||
* @param[out] ec_context Pointer to newly generated EC context which contains the
|
||||
* EC public key component.
|
||||
* Use libspdm_ec_free() function to free the resource.
|
||||
*
|
||||
* If der_data is NULL, then return false.
|
||||
* If ec_context is NULL, then return false.
|
||||
*
|
||||
* @retval true EC context was generated successfully.
|
||||
* @retval false Invalid DER public key data.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_ec_get_public_key_from_der(const uint8_t *der_data,
|
||||
size_t der_size,
|
||||
void **ec_context);
|
||||
|
||||
/**
|
||||
* Carries out the EC-DSA signature.
|
||||
*
|
||||
@@ -132,6 +193,29 @@ extern bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
uint8_t *signature, size_t *sig_size);
|
||||
|
||||
#if LIBSPDM_FIPS_MODE
|
||||
/**
|
||||
* Carries out the EC-DSA signature with caller input random function. This API can be used for FIPS test.
|
||||
*
|
||||
* @param[in] ec_context Pointer to EC context for signature generation.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] message_hash Pointer to octet message hash to be signed.
|
||||
* @param[in] hash_size Size of the message hash in bytes.
|
||||
* @param[out] signature Pointer to buffer to receive EC-DSA signature.
|
||||
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
|
||||
* On output, the size of data returned in signature buffer in bytes.
|
||||
* @param[in] random_func random number function
|
||||
*
|
||||
* @retval true signature successfully generated in EC-DSA.
|
||||
* @retval false signature generation failed.
|
||||
* @retval false sig_size is too small.
|
||||
**/
|
||||
extern bool libspdm_ecdsa_sign_ex(void *ec_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
uint8_t *signature, size_t *sig_size,
|
||||
int (*random_func)(void *, unsigned char *, size_t));
|
||||
#endif/*LIBSPDM_FIPS_MODE*/
|
||||
|
||||
/**
|
||||
* Verifies the EC-DSA signature.
|
||||
*
|
||||
|
||||
@@ -22,6 +22,29 @@
|
||||
**/
|
||||
extern void *libspdm_ecd_new_by_nid(size_t nid);
|
||||
|
||||
/**
|
||||
* Generates Edwards-Curve context from DER-encoded public key data.
|
||||
*
|
||||
* The public key is ASN.1 DER-encoded as RFC7250 describes,
|
||||
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
|
||||
*
|
||||
* @param[in] der_data Pointer to the DER-encoded public key data.
|
||||
* @param[in] der_size Size of the DER-encoded public key data in bytes.
|
||||
* @param[out] ec_context Pointer to newly generated Ed context which contains the
|
||||
* Ed public key component.
|
||||
* Use libspdm_ecd_free() function to free the resource.
|
||||
*
|
||||
* If der_data is NULL, then return false.
|
||||
* If ecd_context is NULL, then return false.
|
||||
*
|
||||
* @retval true Ed context was generated successfully.
|
||||
* @retval false Invalid DER public key data.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_ecd_get_public_key_from_der(const uint8_t *der_data,
|
||||
size_t der_size,
|
||||
void **ecd_context);
|
||||
|
||||
/**
|
||||
* Release the specified Ed context.
|
||||
*
|
||||
@@ -29,6 +52,56 @@ extern void *libspdm_ecd_new_by_nid(size_t nid);
|
||||
**/
|
||||
extern void libspdm_ecd_free(void *ecd_context);
|
||||
|
||||
/**
|
||||
* Sets the public key component into the established Ed context.
|
||||
*
|
||||
* For ed25519, the public_size is 32.
|
||||
* For ed448, the public_size is 57.
|
||||
*
|
||||
* @param[in, out] ecd_context Pointer to Ed context being set.
|
||||
* @param[in] public_key Pointer to the buffer to receive generated public X,Y.
|
||||
* @param[in] public_size The size of public buffer in bytes.
|
||||
*
|
||||
* @retval true Ed public key component was set successfully.
|
||||
* @retval false Invalid EC public key component.
|
||||
**/
|
||||
extern bool libspdm_ecd_set_pub_key(void *ecd_context, const uint8_t *public_key,
|
||||
size_t public_key_size);
|
||||
|
||||
/**
|
||||
* Sets the private key component into the established Ed context.
|
||||
*
|
||||
* For ed25519, the private_size is 32.
|
||||
* For ed448, the private_size is 57.
|
||||
*
|
||||
* @param[in, out] ecd_context Pointer to Ed context being set.
|
||||
* @param[in] private Pointer to the buffer to receive generated private X,Y.
|
||||
* @param[in] private_size The size of private buffer in bytes.
|
||||
*
|
||||
* @retval true Ed private key component was set successfully.
|
||||
* @retval false Invalid EC private key component.
|
||||
*
|
||||
**/
|
||||
bool libspdm_ecd_set_pri_key(void *ecd_context, const uint8_t *private_key,
|
||||
size_t private_key_size);
|
||||
|
||||
/**
|
||||
* Gets the public key component from the established Ed context.
|
||||
*
|
||||
* For ed25519, the public_size is 32.
|
||||
* For ed448, the public_size is 57.
|
||||
*
|
||||
* @param[in, out] ecd_context Pointer to Ed context being set.
|
||||
* @param[out] public Pointer to the buffer to receive generated public X,Y.
|
||||
* @param[in, out] public_size On input, the size of public buffer in bytes.
|
||||
* On output, the size of data returned in public buffer in bytes.
|
||||
*
|
||||
* @retval true Ed key component was retrieved successfully.
|
||||
* @retval false Invalid EC public key component.
|
||||
**/
|
||||
extern bool libspdm_ecd_get_pub_key(void *ecd_context, uint8_t *public_key,
|
||||
size_t *public_key_size);
|
||||
|
||||
/**
|
||||
* Carries out the Ed-DSA signature.
|
||||
*
|
||||
|
||||
@@ -35,6 +35,28 @@ typedef enum {
|
||||
**/
|
||||
extern void *libspdm_rsa_new(void);
|
||||
|
||||
/**
|
||||
* Generates RSA context from DER-encoded public key data.
|
||||
*
|
||||
* The public key is ASN.1 DER-encoded as RFC7250 describes,
|
||||
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
|
||||
*
|
||||
* @param[in] der_data Pointer to the DER-encoded public key data.
|
||||
* @param[in] der_size Size of the DER-encoded public key data in bytes.
|
||||
* @param[out] rsa_context Pointer to newly generated RSA context which contains the
|
||||
* RSA public key component.
|
||||
* Use libspdm_rsa_free() function to free the resource.
|
||||
*
|
||||
* If der_data is NULL, then return false.
|
||||
* If rsa_context is NULL, then return false.
|
||||
*
|
||||
* @retval true RSA context was generated successfully.
|
||||
* @retval false Invalid DER public key data.
|
||||
**/
|
||||
extern bool libspdm_rsa_get_public_key_from_der(const uint8_t *der_data,
|
||||
size_t der_size,
|
||||
void **rsa_context);
|
||||
|
||||
/**
|
||||
* Release the specified RSA context.
|
||||
*
|
||||
@@ -67,80 +89,6 @@ extern void libspdm_rsa_free(void *rsa_context);
|
||||
extern bool libspdm_rsa_set_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
|
||||
const uint8_t *big_number, size_t bn_size);
|
||||
|
||||
/**
|
||||
* Gets the tag-designated RSA key component from the established RSA context.
|
||||
*
|
||||
* This function retrieves the tag-designated RSA key component from the
|
||||
* established RSA context as a non-negative integer (octet string format
|
||||
* represented in RSA PKCS#1).
|
||||
* If specified key component has not been set or has been cleared, then returned
|
||||
* bn_size is set to 0.
|
||||
* If the big_number buffer is too small to hold the contents of the key, false
|
||||
* is returned and bn_size is set to the required buffer size to obtain the key.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If bn_size is NULL, then return false.
|
||||
* If bn_size is large enough but big_number is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] rsa_context Pointer to RSA context being set.
|
||||
* @param[in] key_tag Tag of RSA key component being set.
|
||||
* @param[out] big_number Pointer to octet integer buffer.
|
||||
* @param[in, out] bn_size On input, the size of big number buffer in bytes.
|
||||
* On output, the size of data returned in big number buffer in bytes.
|
||||
*
|
||||
* @retval true RSA key component was retrieved successfully.
|
||||
* @retval false Invalid RSA key component tag.
|
||||
* @retval false bn_size is too small.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_rsa_get_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
|
||||
uint8_t *big_number, size_t *bn_size);
|
||||
|
||||
/**
|
||||
* Generates RSA key components.
|
||||
*
|
||||
* This function generates RSA key components. It takes RSA public exponent E and
|
||||
* length in bits of RSA modulus N as input, and generates all key components.
|
||||
* If public_exponent is NULL, the default RSA public exponent (0x10001) will be used.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] rsa_context Pointer to RSA context being set.
|
||||
* @param[in] modulus_length Length of RSA modulus N in bits.
|
||||
* @param[in] public_exponent Pointer to RSA public exponent.
|
||||
* @param[in] public_exponent_size Size of RSA public exponent buffer in bytes.
|
||||
*
|
||||
* @retval true RSA key component was generated successfully.
|
||||
* @retval false Invalid RSA key component tag.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_rsa_generate_key(void *rsa_context, size_t modulus_length,
|
||||
const uint8_t *public_exponent,
|
||||
size_t public_exponent_size);
|
||||
|
||||
/**
|
||||
* Validates key components of RSA context.
|
||||
* NOTE: This function performs integrity checks on all the RSA key material, so
|
||||
* the RSA key structure must contain all the private key data.
|
||||
*
|
||||
* This function validates key components of RSA context in following aspects:
|
||||
* - Whether p is a prime
|
||||
* - Whether q is a prime
|
||||
* - Whether n = p * q
|
||||
* - Whether d*e = 1 mod lcm(p-1,q-1)
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] rsa_context Pointer to RSA context to check.
|
||||
*
|
||||
* @retval true RSA key components are valid.
|
||||
* @retval false RSA key components are not valid.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_rsa_check_key(void *rsa_context);
|
||||
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
|
||||
|
||||
#if LIBSPDM_RSA_SSA_SUPPORT
|
||||
@@ -260,5 +208,67 @@ extern bool libspdm_rsa_pss_sign(void *rsa_context, size_t hash_nid,
|
||||
extern bool libspdm_rsa_pss_verify(void *rsa_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
const uint8_t *signature, size_t sig_size);
|
||||
|
||||
#if LIBSPDM_FIPS_MODE
|
||||
/**
|
||||
* Carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme for FIPS test.
|
||||
*
|
||||
* This function carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme defined in
|
||||
* RSA PKCS#1 v2.2 for FIPS test.
|
||||
*
|
||||
* The salt length is zero.
|
||||
*
|
||||
* If the signature buffer is too small to hold the contents of signature, false
|
||||
* is returned and sig_size is set to the required buffer size to obtain the signature.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If message_hash is NULL, then return false.
|
||||
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512.
|
||||
* If sig_size is large enough but signature is NULL, then return false.
|
||||
*
|
||||
* @param[in] rsa_context Pointer to RSA context for signature generation.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] message_hash Pointer to octet message hash to be signed.
|
||||
* @param[in] hash_size size of the message hash in bytes.
|
||||
* @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature.
|
||||
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
|
||||
* On output, the size of data returned in signature buffer in bytes.
|
||||
*
|
||||
* @retval true signature successfully generated in RSA-SSA PSS.
|
||||
* @retval false signature generation failed.
|
||||
* @retval false sig_size is too small.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_rsa_pss_sign_fips(void *rsa_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
uint8_t *signature, size_t *sig_size);
|
||||
|
||||
/**
|
||||
* Verifies the RSA-SSA signature with EMSA-PSS encoding scheme defined in
|
||||
* RSA PKCS#1 v2.2 for FIPS test.
|
||||
*
|
||||
* The salt length is zero.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If message_hash is NULL, then return false.
|
||||
* If signature is NULL, then return false.
|
||||
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512.
|
||||
*
|
||||
* @param[in] rsa_context Pointer to RSA context for signature verification.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] message_hash Pointer to octet message hash to be checked.
|
||||
* @param[in] hash_size size of the message hash in bytes.
|
||||
* @param[in] signature Pointer to RSA-SSA PSS signature to be verified.
|
||||
* @param[in] sig_size size of signature in bytes.
|
||||
*
|
||||
* @retval true Valid signature encoded in RSA-SSA PSS.
|
||||
* @retval false Invalid signature or invalid RSA context.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_rsa_pss_verify_fips(void *rsa_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
const uint8_t *signature, size_t sig_size);
|
||||
#endif /*LIBSPDM_FIPS_MODE*/
|
||||
|
||||
#endif /* LIBSPDM_RSA_PSS_SUPPORT */
|
||||
#endif /* CRYPTLIB_RSA_H */
|
||||
|
||||
@@ -22,6 +22,29 @@
|
||||
**/
|
||||
extern void *libspdm_sm2_dsa_new_by_nid(size_t nid);
|
||||
|
||||
/**
|
||||
* Generates Shang-Mi2 context from DER-encoded public key data.
|
||||
*
|
||||
* The public key is ASN.1 DER-encoded as RFC7250 describes,
|
||||
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
|
||||
*
|
||||
* @param[in] der_data Pointer to the DER-encoded public key data.
|
||||
* @param[in] der_size Size of the DER-encoded public key data in bytes.
|
||||
* @param[out] sm2_context Pointer to newly generated SM2 context which contains the
|
||||
* SM2 public key component.
|
||||
* Use libspdm_sm2_free() function to free the resource.
|
||||
*
|
||||
* If der_data is NULL, then return false.
|
||||
* If sm2_context is NULL, then return false.
|
||||
*
|
||||
* @retval true SM2 context was generated successfully.
|
||||
* @retval false Invalid DER public key data.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_sm2_get_public_key_from_der(const uint8_t *der_data,
|
||||
size_t der_size,
|
||||
void **sm2_context);
|
||||
|
||||
/**
|
||||
* Release the specified sm2 context.
|
||||
*
|
||||
|
||||
@@ -13,39 +13,6 @@
|
||||
#include LIBSPDM_CONFIG
|
||||
#endif
|
||||
|
||||
#if defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) && \
|
||||
!defined(LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP)
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use " \
|
||||
"LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a " \
|
||||
"future release.")
|
||||
#else
|
||||
#warning LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use \
|
||||
LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a \
|
||||
future release.
|
||||
#endif /* _MSC_VER */
|
||||
#endif /* defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) */
|
||||
|
||||
#if defined(LIBSPDM_ENABLE_CHUNK_CAP) && !defined(LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP)
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP " \
|
||||
"instead. This warning will be removed in a future release.")
|
||||
#else
|
||||
#warning LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP \
|
||||
instead. This warning will be removed in a future release.
|
||||
#endif /* _MSC_VER */
|
||||
#endif /* defined(LIBSPDM_ENABLE_CHUNK_CAP) */
|
||||
|
||||
#if defined(MDEPKG_NDEBUG) && !defined(LIBSPDM_DEBUG_ENABLE)
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE " \
|
||||
"instead. This warning will be removed in a future release.")
|
||||
#else
|
||||
#warning MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE \
|
||||
instead. This warning will be removed in a future release.
|
||||
#endif /* _MSC_VER */
|
||||
#endif /* defined(MDEPKG_NDEBUG) */
|
||||
|
||||
#if defined(LIBSPDM_DEBUG_ENABLE)
|
||||
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
|
||||
#undef LIBSPDM_DEBUG_PRINT_ENABLE
|
||||
@@ -64,6 +31,67 @@
|
||||
#define LIBSPDM_DEBUG_BLOCK_ENABLE 0
|
||||
#endif /* defined(LIBSPDM_DEBUG_ENABLE) */
|
||||
|
||||
/*when in FIPS mode, only support approved algo in FIPS */
|
||||
#if LIBSPDM_FIPS_MODE
|
||||
#undef LIBSPDM_SM2_DSA_P256_SUPPORT
|
||||
#define LIBSPDM_SM2_DSA_P256_SUPPORT 0
|
||||
|
||||
#undef LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT
|
||||
#define LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT 0
|
||||
|
||||
#undef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
|
||||
#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 0
|
||||
|
||||
#undef LIBSPDM_AEAD_SM4_128_GCM_SUPPORT
|
||||
#define LIBSPDM_AEAD_SM4_128_GCM_SUPPORT 0
|
||||
|
||||
#undef LIBSPDM_SM3_256_SUPPORT
|
||||
#define LIBSPDM_SM3_256_SUPPORT 0
|
||||
#endif /*LIBSPDM_FIPS_MODE*/
|
||||
|
||||
/* define crypto algorithm without parameter */
|
||||
#define LIBSPDM_RSA_SSA_SUPPORT ((LIBSPDM_RSA_SSA_2048_SUPPORT) || \
|
||||
(LIBSPDM_RSA_SSA_3072_SUPPORT) || \
|
||||
(LIBSPDM_RSA_SSA_4096_SUPPORT))
|
||||
|
||||
#define LIBSPDM_RSA_PSS_SUPPORT ((LIBSPDM_RSA_PSS_2048_SUPPORT) || \
|
||||
(LIBSPDM_RSA_PSS_3072_SUPPORT) || \
|
||||
(LIBSPDM_RSA_PSS_4096_SUPPORT))
|
||||
|
||||
#define LIBSPDM_ECDSA_SUPPORT ((LIBSPDM_ECDSA_P256_SUPPORT) || \
|
||||
(LIBSPDM_ECDSA_P384_SUPPORT) || \
|
||||
(LIBSPDM_ECDSA_P521_SUPPORT))
|
||||
|
||||
#define LIBSPDM_SM2_DSA_SUPPORT (LIBSPDM_SM2_DSA_P256_SUPPORT)
|
||||
|
||||
#define LIBSPDM_EDDSA_SUPPORT ((LIBSPDM_EDDSA_ED25519_SUPPORT) || \
|
||||
(LIBSPDM_EDDSA_ED448_SUPPORT))
|
||||
|
||||
#define LIBSPDM_FFDHE_SUPPORT ((LIBSPDM_FFDHE_2048_SUPPORT) || \
|
||||
(LIBSPDM_FFDHE_3072_SUPPORT) || \
|
||||
(LIBSPDM_FFDHE_4096_SUPPORT))
|
||||
|
||||
#define LIBSPDM_ECDHE_SUPPORT ((LIBSPDM_ECDHE_P256_SUPPORT) || \
|
||||
(LIBSPDM_ECDHE_P384_SUPPORT) || \
|
||||
(LIBSPDM_ECDHE_P521_SUPPORT))
|
||||
|
||||
#define LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT (LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT)
|
||||
|
||||
#define LIBSPDM_AEAD_GCM_SUPPORT ((LIBSPDM_AEAD_AES_128_GCM_SUPPORT) || \
|
||||
(LIBSPDM_AEAD_AES_256_GCM_SUPPORT))
|
||||
|
||||
#define LIBSPDM_AEAD_SM4_SUPPORT (LIBSPDM_AEAD_SM4_128_GCM_SUPPORT)
|
||||
|
||||
#define LIBSPDM_SHA2_SUPPORT ((LIBSPDM_SHA256_SUPPORT) || \
|
||||
(LIBSPDM_SHA384_SUPPORT) || \
|
||||
(LIBSPDM_SHA512_SUPPORT))
|
||||
|
||||
#define LIBSPDM_SHA3_SUPPORT ((LIBSPDM_SHA3_256_SUPPORT) || \
|
||||
(LIBSPDM_SHA3_384_SUPPORT) || \
|
||||
(LIBSPDM_SHA3_512_SUPPORT))
|
||||
|
||||
#define LIBSPDM_SM3_SUPPORT (LIBSPDM_SM3_256_SUPPORT)
|
||||
|
||||
#if LIBSPDM_CHECK_MACRO
|
||||
#include "internal/libspdm_macro_check.h"
|
||||
#endif /* LIBSPDM_CHECK_MACRO */
|
||||
|
||||
@@ -7,6 +7,11 @@
|
||||
#ifndef SPDM_LIB_CONFIG_H
|
||||
#define SPDM_LIB_CONFIG_H
|
||||
|
||||
/* Enables FIPS 140-3 mode. */
|
||||
#ifndef LIBSPDM_FIPS_MODE
|
||||
#define LIBSPDM_FIPS_MODE 0
|
||||
#endif
|
||||
|
||||
/* Enables assertions and debug printing. When `LIBSPDM_DEBUG_ENABLE` is defined it overrides or
|
||||
* sets the values of `LIBSPDM_DEBUG_PRINT_ENABLE`, `LIBSPDM_DEBUG_ASSERT_ENABLE`, and
|
||||
* `LIBSPDM_BLOCK_ENABLE` to the value of `LIBSPDM_DEBUG_ENABLE`.
|
||||
@@ -21,7 +26,7 @@
|
||||
/* The SPDM specification allows a Responder to return up to 256 version entries in the `VERSION`
|
||||
* response to the Requester, including duplicate entries. For a Requester this value specifies the
|
||||
* maximum number of entries that libspdm will tolerate in a `VERSION` response before returning an
|
||||
* error. A similiar macro, `SPDM_MAX_VERSION_COUNT`, exists for the Responder. However this macro
|
||||
* error. A similar macro, `SPDM_MAX_VERSION_COUNT`, exists for the Responder. However this macro
|
||||
* is not meant to be configured by the Integrator.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_VERSION_COUNT
|
||||
@@ -71,38 +76,6 @@
|
||||
#define LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN 1024
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
|
||||
#define LIBSPDM_MAX_MESSAGE_BUFFER_SIZE 0x1200
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_MESSAGE_SMALL_BUFFER_SIZE
|
||||
#define LIBSPDM_MAX_MESSAGE_SMALL_BUFFER_SIZE 0x100 /* to hold message_a before negotiate*/
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_MESSAGE_MEDIUM_BUFFER_SIZE
|
||||
#define LIBSPDM_MAX_MESSAGE_MEDIUM_BUFFER_SIZE 0x300 /* to hold message_k before finished_key is ready*/
|
||||
#endif
|
||||
|
||||
/* If the Responder replies with a Busy `ERROR` response to a request then the Requester is free to
|
||||
* retry sending the request. This value specifies the maximum number of times libspdm will retry
|
||||
* sending the request before returning an error. If its value is 0 then libspdm will not send any
|
||||
* retry requests.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_REQUEST_RETRY_TIMES
|
||||
#define LIBSPDM_MAX_REQUEST_RETRY_TIMES 3
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_SESSION_STATE_CALLBACK_NUM
|
||||
#define LIBSPDM_MAX_SESSION_STATE_CALLBACK_NUM 4
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_CONNECTION_STATE_CALLBACK_NUM
|
||||
#define LIBSPDM_MAX_CONNECTION_STATE_CALLBACK_NUM 4
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_KEY_UPDATE_CALLBACK_NUM
|
||||
#define LIBSPDM_MAX_KEY_UPDATE_CALLBACK_NUM 4
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_MAX_CSR_SIZE
|
||||
#define LIBSPDM_MAX_CSR_SIZE 0x1000
|
||||
#endif
|
||||
|
||||
/* To ensure integrity in communication between the Requester and the Responder libspdm calculates
|
||||
* cryptographic digests and signatures over multiple requests and responses. This value specifies
|
||||
* whether libspdm will use a running calculation over the transcript, where requests and responses
|
||||
@@ -113,23 +86,44 @@
|
||||
#define LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT 0
|
||||
#endif
|
||||
|
||||
|
||||
/* Cryptography Configuration
|
||||
* In each category, at least one should be selected.
|
||||
* NOTE: Not all combination can be supported. E.g. Don't mix NIST algo with SMx.*/
|
||||
|
||||
#ifndef LIBSPDM_RSA_SSA_SUPPORT
|
||||
#define LIBSPDM_RSA_SSA_SUPPORT 1
|
||||
#ifndef LIBSPDM_RSA_SSA_2048_SUPPORT
|
||||
#define LIBSPDM_RSA_SSA_2048_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_RSA_PSS_SUPPORT
|
||||
#define LIBSPDM_RSA_PSS_SUPPORT 1
|
||||
#ifndef LIBSPDM_RSA_SSA_3072_SUPPORT
|
||||
#define LIBSPDM_RSA_SSA_3072_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_ECDSA_SUPPORT
|
||||
#define LIBSPDM_ECDSA_SUPPORT 1
|
||||
#ifndef LIBSPDM_RSA_SSA_4096_SUPPORT
|
||||
#define LIBSPDM_RSA_SSA_4096_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SM2_DSA_SUPPORT
|
||||
#define LIBSPDM_SM2_DSA_SUPPORT 1
|
||||
|
||||
#ifndef LIBSPDM_RSA_PSS_2048_SUPPORT
|
||||
#define LIBSPDM_RSA_PSS_2048_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_RSA_PSS_3072_SUPPORT
|
||||
#define LIBSPDM_RSA_PSS_3072_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_RSA_PSS_4096_SUPPORT
|
||||
#define LIBSPDM_RSA_PSS_4096_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ECDSA_P256_SUPPORT
|
||||
#define LIBSPDM_ECDSA_P256_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_ECDSA_P384_SUPPORT
|
||||
#define LIBSPDM_ECDSA_P384_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_ECDSA_P521_SUPPORT
|
||||
#define LIBSPDM_ECDSA_P521_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_SM2_DSA_P256_SUPPORT
|
||||
#define LIBSPDM_SM2_DSA_P256_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_EDDSA_ED25519_SUPPORT
|
||||
#define LIBSPDM_EDDSA_ED25519_SUPPORT 1
|
||||
#endif
|
||||
@@ -137,24 +131,43 @@
|
||||
#define LIBSPDM_EDDSA_ED448_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_FFDHE_SUPPORT
|
||||
#define LIBSPDM_FFDHE_SUPPORT 1
|
||||
#ifndef LIBSPDM_FFDHE_2048_SUPPORT
|
||||
#define LIBSPDM_FFDHE_2048_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_ECDHE_SUPPORT
|
||||
#define LIBSPDM_ECDHE_SUPPORT 1
|
||||
#ifndef LIBSPDM_FFDHE_3072_SUPPORT
|
||||
#define LIBSPDM_FFDHE_3072_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT
|
||||
#define LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT 1
|
||||
#ifndef LIBSPDM_FFDHE_4096_SUPPORT
|
||||
#define LIBSPDM_FFDHE_4096_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_AEAD_GCM_SUPPORT
|
||||
#define LIBSPDM_AEAD_GCM_SUPPORT 1
|
||||
#ifndef LIBSPDM_ECDHE_P256_SUPPORT
|
||||
#define LIBSPDM_ECDHE_P256_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_ECDHE_P384_SUPPORT
|
||||
#define LIBSPDM_ECDHE_P384_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_ECDHE_P521_SUPPORT
|
||||
#define LIBSPDM_ECDHE_P521_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT
|
||||
#define LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_AEAD_AES_128_GCM_SUPPORT
|
||||
#define LIBSPDM_AEAD_AES_128_GCM_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_AEAD_AES_256_GCM_SUPPORT
|
||||
#define LIBSPDM_AEAD_AES_256_GCM_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
|
||||
#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_AEAD_SM4_SUPPORT
|
||||
#define LIBSPDM_AEAD_SM4_SUPPORT 1
|
||||
|
||||
#ifndef LIBSPDM_AEAD_SM4_128_GCM_SUPPORT
|
||||
#define LIBSPDM_AEAD_SM4_128_GCM_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_SHA256_SUPPORT
|
||||
@@ -166,6 +179,7 @@
|
||||
#ifndef LIBSPDM_SHA512_SUPPORT
|
||||
#define LIBSPDM_SHA512_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_SHA3_256_SUPPORT
|
||||
#define LIBSPDM_SHA3_256_SUPPORT 1
|
||||
#endif
|
||||
@@ -175,10 +189,16 @@
|
||||
#ifndef LIBSPDM_SHA3_512_SUPPORT
|
||||
#define LIBSPDM_SHA3_512_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_SM3_256_SUPPORT
|
||||
#define LIBSPDM_SM3_256_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* This can be set to 0 for the device which does not need X509 parser.*/
|
||||
#ifndef LIBSPDM_CERT_PARSE_SUPPORT
|
||||
#define LIBSPDM_CERT_PARSE_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* Code space optimization for Optional request/response messages.*/
|
||||
|
||||
/* Consumers of libspdm may wish to not fully implement all of the optional
|
||||
@@ -192,17 +212,17 @@
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_CERT_CAP - Enable/Disable single CERT capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP - Enable/Disable single CHAL capability.
|
||||
* LIBSPDM_ENABLE_CAPABILTIY_MEAS_CAP - Enable/Disables multiple MEAS capabilities:
|
||||
* LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP - Enable/Disables multiple MEAS capabilities:
|
||||
* (MEAS_CAP_NO_SIG, MEAS_CAP_SIG, MEAS_FRESH_CAP)*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP - Enable/Disable single Key Exchange capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP - Enable/Disable PSK_EX and PSK_FINISH.*/
|
||||
* LIBSPDM_ENABLE_CAPABILITY_PSK_CAP - Enable/Disable PSK_EX and PSK_FINISH.*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP - Enable/Disable mutual authentication.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP - Enable/Disable encapsulated message.*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP - Enable/Disable get csr capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP - Enable/Disable set certificate capability. */
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_CSR_CAP - Enable/Disable get csr capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP - Enable/Disable set certificate capability. */
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CERT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CERT_CAP 1
|
||||
@@ -220,8 +240,8 @@
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP 1
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_PSK_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_PSK_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP
|
||||
@@ -236,18 +256,51 @@
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP 1
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CSR_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CSR_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP 1
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP 1
|
||||
#endif
|
||||
|
||||
/* If 1 then endpoint supports sending GET_CERTIFICATE and GET_DIGESTS requests.
|
||||
* If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
|
||||
* must also be enabled.
|
||||
*/
|
||||
#ifndef LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT
|
||||
#define LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* If 1 then endpoint supports sending CHALLENGE request.
|
||||
* If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
|
||||
* must also be enabled.
|
||||
*/
|
||||
#ifndef LIBSPDM_SEND_CHALLENGE_SUPPORT
|
||||
#define LIBSPDM_SEND_CHALLENGE_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* When LIBSPDM_RESPOND_IF_READY_SUPPORT is 0 then
|
||||
* - For a Requester, if the Responder sends a ResponseNotReady ERROR response then the error
|
||||
* is immediately returned to the Integrator. The Requester cannot send a RESPOND_IF_READY
|
||||
* request.
|
||||
* - For a Responder, it cannot send a RESPOND_IF_READY ERROR response and does not support
|
||||
* RESPOND_IF_READY.
|
||||
* When LIBSPDM_RESPOND_IF_READY_SUPPORT is 1 then
|
||||
* - For a Requester, if the Responder sends a ResponseNotReady ERROR response then libspdm
|
||||
* waits an amount of time, as specified by the RDTExponent parameter, before sending
|
||||
* RESPOND_IF_READY.
|
||||
* - For a Responder, if its response state is NOT_READY then it will send a ResponseNotReady
|
||||
* ERROR response to the Requester, and will accept a subsequent RESPOND_IF_READY request.
|
||||
*/
|
||||
#ifndef LIBSPDM_RESPOND_IF_READY_SUPPORT
|
||||
#define LIBSPDM_RESPOND_IF_READY_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MinDataTransferSize = 42
|
||||
*
|
||||
@@ -290,7 +343,7 @@
|
||||
* | CHALLENGE 1.2 | 40 | 1 |
|
||||
* | CHALLENGE_AUTH 1.2 | 38 + H * 2 + S [+ O] = [166, 678] | [6, 23] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | GET_MEASUREMENTS 1.2 | 5 + Nounce (0 or 32) | 1 |
|
||||
* | GET_MEASUREMENTS 1.2 | 5 + Nonce (0 or 32) | 1 |
|
||||
* | MEASUREMENTS 1.2 | 42 + MeasRecLen (+ S) [+ O] = [106, 554] | [4, 19] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | KEY_EXCHANGE 1.2 | 42 + D [+ O] = [106, 554] | [4, 19] |
|
||||
@@ -313,93 +366,6 @@
|
||||
* +==========================+==========================================+=========+
|
||||
*/
|
||||
|
||||
/* Maximum size of a large SPDM message.
|
||||
* If chunk is unsupported, it must be same as LIBSPDM_DATA_TRANSFER_SIZE.
|
||||
* If chunk is supported, it must be larger than LIBSPDM_DATA_TRANSFER_SIZE.
|
||||
* It matches MaxSPDMmsgSize in SPDM specification. */
|
||||
#ifndef LIBSPDM_MAX_SPDM_MSG_SIZE
|
||||
#define LIBSPDM_MAX_SPDM_MSG_SIZE LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
|
||||
#endif
|
||||
|
||||
/* Maximum size of a single SPDM message.
|
||||
* It matches DataTransferSize in SPDM specification. */
|
||||
#ifndef LIBSPDM_DATA_TRANSFER_SIZE
|
||||
#define LIBSPDM_DATA_TRANSFER_SIZE LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
|
||||
#endif
|
||||
|
||||
/* Required sender/receive buffer in device io.
|
||||
* NOTE: This is transport specific. Below configuration is just an example.
|
||||
* +-------+--------+---------------------------+------+--+------+---+--------+-----+
|
||||
* | TYPE |TransHdr| EncryptionHeader |AppHdr| |Random|MAC|AlignPad|FINAL|
|
||||
* | | |SessionId|SeqNum|Len|AppLen| | | | | | |
|
||||
* +-------+--------+---------------------------+------+ +------+---+--------+-----+
|
||||
* | MCTP | 1 | 4 | 2 | 2 | 2 | 1 | | 32 | 12| 0 | 56 |
|
||||
* |PCI_DOE| 8 | 4 | 0 | 2 | 2 | 0 | | 0 | 12| 3 | 31 |
|
||||
* +-------+--------+---------------------------+------+--+------+---+--------+-----+
|
||||
*/
|
||||
#ifndef LIBSPDM_TRANSPORT_ADDITIONAL_SIZE
|
||||
#define LIBSPDM_TRANSPORT_ADDITIONAL_SIZE 64
|
||||
#endif
|
||||
#ifndef LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE
|
||||
#define LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE (LIBSPDM_DATA_TRANSFER_SIZE + \
|
||||
LIBSPDM_TRANSPORT_ADDITIONAL_SIZE)
|
||||
#endif
|
||||
|
||||
|
||||
/* Required scratch buffer size for libspdm internal usage.
|
||||
* It may be used to hold the encrypted/decrypted message and/or last sent/received message.
|
||||
* It may be used to hold the large request/response and intermediate send/receive buffer
|
||||
* in case of chunking.
|
||||
*
|
||||
* If chunking is not supported, it may be just LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE.
|
||||
* If chunking is supported, it should be at least below.
|
||||
*
|
||||
* +---------------+--------------+--------------------------+------------------------------+
|
||||
* |SECURE_MESSAGE |LARGE_MESSAGE | SENDER_RECEIVER | LARGE_SENDER_RECEIVER |
|
||||
* +---------------+--------------+--------------------------+------------------------------+
|
||||
* |<-Secure msg ->|<-Large msg ->|<-Snd/Rcv buf for chunk ->|<-Snd/Rcv buf for large msg ->|
|
||||
*
|
||||
* The value is NOT configurable.
|
||||
* The value MAY be changed in different libspdm version.
|
||||
* It is exposed here, just in case the libspdm consumer wants to configure the setting at build time.
|
||||
*/
|
||||
#if LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
|
||||
|
||||
/* first section */
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_OFFSET 0
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
|
||||
|
||||
/* second section */
|
||||
#define LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_OFFSET (LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY)
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
|
||||
|
||||
/* third section */
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_OFFSET \
|
||||
(LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY)
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
|
||||
|
||||
/* fourth section */
|
||||
#define LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_OFFSET \
|
||||
(LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY)
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SIZE (LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_CAPACITY \
|
||||
)
|
||||
|
||||
#else
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SIZE (LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE)
|
||||
#endif
|
||||
|
||||
/* Enable message logging.
|
||||
* See https://github.com/DMTF/libspdm/blob/main/doc/user_guide.md#message-logging
|
||||
* for more information */
|
||||
@@ -412,4 +378,9 @@
|
||||
#define LIBSPDM_CHECK_MACRO 0
|
||||
#endif
|
||||
|
||||
/* Enable checks to the SPDM context during runtime. */
|
||||
#ifndef LIBSPDM_CHECK_SPDM_CONTEXT
|
||||
#define LIBSPDM_CHECK_SPDM_CONTEXT 1
|
||||
#endif
|
||||
|
||||
#endif /* SPDM_LIB_CONFIG_H */
|
||||
|
||||
@@ -23,10 +23,16 @@
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
#ifdef USE_LKCA
|
||||
#ifndef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT
|
||||
#include <crypto/internal/hash.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void *lkca_hash_new(const char* alg_name)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
return NULL;
|
||||
#else
|
||||
//XXX: can we reuse crypto_shash part and just allocate desc
|
||||
struct crypto_shash *alg;
|
||||
@@ -87,9 +93,24 @@ bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src)
|
||||
|
||||
struct crypto_shash *src_tfm = src->tfm;
|
||||
struct crypto_shash *dst_tfm = dst->tfm;
|
||||
int ss = crypto_shash_statesize(dst_tfm);
|
||||
|
||||
#ifdef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT
|
||||
char *src_ipad = crypto_tfm_ctx_aligned(&src_tfm->base);
|
||||
char *dst_ipad = crypto_tfm_ctx_aligned(&dst_tfm->base);
|
||||
int ss = crypto_shash_statesize(dst_tfm);
|
||||
#else
|
||||
int ctx_size = crypto_shash_alg(dst_tfm)->base.cra_ctxsize;
|
||||
char *src_ipad = crypto_shash_ctx(src_tfm);
|
||||
char *dst_ipad = crypto_shash_ctx(dst_tfm);
|
||||
/*
|
||||
* Actual struct definition is hidden, so I assume data we need is at
|
||||
* the end. In 6.0 the struct has a pointer to crpyto_shash followed by:
|
||||
* 'u8 ipad[statesize];', then 'u8 opad[statesize];'
|
||||
*/
|
||||
src_ipad += ctx_size - 2 * ss;
|
||||
dst_ipad += ctx_size - 2 * ss;
|
||||
#endif
|
||||
|
||||
memcpy(dst_ipad, src_ipad, crypto_shash_blocksize(src->tfm));
|
||||
memcpy(dst_ipad + ss, src_ipad + ss, crypto_shash_blocksize(src->tfm));
|
||||
crypto_shash_clear_flags(dst->tfm, CRYPTO_TFM_NEED_KEY);
|
||||
|
||||
@@ -64,6 +64,7 @@ static nv_cap_table_entry_t g_nv_cap_mig_table[] =
|
||||
|
||||
static nv_cap_table_entry_t g_nv_cap_sys_table[] =
|
||||
{
|
||||
{"/driver/nvidia/capabilities/fabric-imex-mgmt"}
|
||||
};
|
||||
|
||||
#define NV_CAP_MIG_CI_ENTRIES(_gi) \
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -447,7 +447,7 @@ NV_STATUS NV_API_CALL nv_dma_map_sgt(
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
if (page_count > os_get_num_phys_pages())
|
||||
if (page_count > NV_NUM_PHYSPAGES)
|
||||
{
|
||||
NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev,
|
||||
"DMA mapping request too large!\n");
|
||||
@@ -509,7 +509,7 @@ NV_STATUS NV_API_CALL nv_dma_unmap_sgt(
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_map_pages(
|
||||
static NV_STATUS NV_API_CALL nv_dma_map_pages(
|
||||
nv_dma_device_t *dma_dev,
|
||||
NvU64 page_count,
|
||||
NvU64 *va_array,
|
||||
@@ -530,7 +530,7 @@ NV_STATUS NV_API_CALL nv_dma_map_pages(
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
if (page_count > os_get_num_phys_pages())
|
||||
if (page_count > NV_NUM_PHYSPAGES)
|
||||
{
|
||||
NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev,
|
||||
"DMA mapping request too large!\n");
|
||||
@@ -582,7 +582,7 @@ NV_STATUS NV_API_CALL nv_dma_map_pages(
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_unmap_pages(
|
||||
static NV_STATUS NV_API_CALL nv_dma_unmap_pages(
|
||||
nv_dma_device_t *dma_dev,
|
||||
NvU64 page_count,
|
||||
NvU64 *va_array,
|
||||
@@ -602,7 +602,7 @@ NV_STATUS NV_API_CALL nv_dma_unmap_pages(
|
||||
|
||||
dma_map = *priv;
|
||||
|
||||
if (page_count > os_get_num_phys_pages())
|
||||
if (page_count > NV_NUM_PHYSPAGES)
|
||||
{
|
||||
NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev,
|
||||
"DMA unmapping request too large!\n");
|
||||
@@ -1100,7 +1100,6 @@ NV_STATUS NV_API_CALL nv_dma_import_dma_buf
|
||||
nv_dma_device_t *dma_dev,
|
||||
struct dma_buf *dma_buf,
|
||||
NvU32 *size,
|
||||
void **user_pages,
|
||||
struct sg_table **sgt,
|
||||
nv_dma_buf_t **import_priv
|
||||
)
|
||||
@@ -1113,7 +1112,6 @@ NV_STATUS NV_API_CALL nv_dma_import_from_fd
|
||||
nv_dma_device_t *dma_dev,
|
||||
NvS32 fd,
|
||||
NvU32 *size,
|
||||
void **user_pages,
|
||||
struct sg_table **sgt,
|
||||
nv_dma_buf_t **import_priv
|
||||
)
|
||||
@@ -1123,7 +1121,6 @@ NV_STATUS NV_API_CALL nv_dma_import_from_fd
|
||||
|
||||
void NV_API_CALL nv_dma_release_dma_buf
|
||||
(
|
||||
void *user_pages,
|
||||
nv_dma_buf_t *import_priv
|
||||
)
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -376,7 +376,7 @@ void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 cpu_virtual,
|
||||
|
||||
cbsize = nvl->npu->numa_info.l1d_cache_block_size;
|
||||
|
||||
CACHE_FLUSH();
|
||||
asm volatile("sync; isync" ::: "memory");
|
||||
|
||||
/* Force eviction of any cache lines from the NUMA-onlined region. */
|
||||
for (offset = 0; offset < size; offset += cbsize)
|
||||
@@ -387,7 +387,7 @@ void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 cpu_virtual,
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
CACHE_FLUSH();
|
||||
asm volatile("sync; isync" ::: "memory");
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -705,9 +705,9 @@ int nvidia_mmap(
|
||||
struct vm_area_struct *vma
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file);
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
||||
nv_linux_state_t *nvl;
|
||||
nv_state_t *nv;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
int status;
|
||||
|
||||
@@ -720,6 +720,19 @@ int nvidia_mmap(
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!nv_is_control_device(NV_FILE_INODE(file)))
|
||||
{
|
||||
status = nv_wait_open_complete_interruptible(nvlfp);
|
||||
if (status != 0)
|
||||
return status;
|
||||
}
|
||||
|
||||
nvl = nvlfp->nvptr;
|
||||
if (nvl == NULL)
|
||||
return -EIO;
|
||||
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
|
||||
status = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (status != 0)
|
||||
{
|
||||
|
||||
@@ -171,7 +171,6 @@ static void nv_p2p_free_dma_mapping(
|
||||
nv_dma_device_t peer_dma_dev = {{ 0 }};
|
||||
NvU32 page_size;
|
||||
NV_STATUS status;
|
||||
NvU32 i;
|
||||
|
||||
peer_dma_dev.dev = &dma_mapping->pci_dev->dev;
|
||||
peer_dma_dev.addressable_range.limit = dma_mapping->pci_dev->dma_mask;
|
||||
@@ -180,16 +179,64 @@ static void nv_p2p_free_dma_mapping(
|
||||
|
||||
if (dma_mapping->private != NULL)
|
||||
{
|
||||
WARN_ON(page_size != PAGE_SIZE);
|
||||
/*
|
||||
* If OS page size is smaller than P2P page size,
|
||||
* page inflation logic applies for DMA unmapping too.
|
||||
* Bigger P2P page needs to be split in smaller OS pages.
|
||||
*/
|
||||
if (page_size > PAGE_SIZE)
|
||||
{
|
||||
NvU64 *os_dma_addresses = NULL;
|
||||
NvU32 os_pages_per_p2p_page = page_size;
|
||||
NvU32 os_page_count;
|
||||
NvU32 index, i, j;
|
||||
|
||||
status = nv_dma_unmap_alloc(&peer_dma_dev,
|
||||
dma_mapping->entries,
|
||||
dma_mapping->dma_addresses,
|
||||
&dma_mapping->private);
|
||||
WARN_ON(status != NV_OK);
|
||||
do_div(os_pages_per_p2p_page, PAGE_SIZE);
|
||||
|
||||
os_page_count = os_pages_per_p2p_page * dma_mapping->entries;
|
||||
|
||||
status = os_alloc_mem((void **)&os_dma_addresses,
|
||||
(os_page_count * sizeof(NvU64)));
|
||||
if(WARN_ON(status != NV_OK))
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
index = 0;
|
||||
for (i = 0; i < dma_mapping->entries; i++)
|
||||
{
|
||||
os_dma_addresses[index] = dma_mapping->dma_addresses[i];
|
||||
index++;
|
||||
|
||||
for (j = 1; j < os_pages_per_p2p_page; j++)
|
||||
{
|
||||
os_dma_addresses[index] = os_dma_addresses[index - 1] + PAGE_SIZE;
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
status = nv_dma_unmap_alloc(&peer_dma_dev,
|
||||
os_page_count,
|
||||
os_dma_addresses,
|
||||
&dma_mapping->private);
|
||||
WARN_ON(status != NV_OK);
|
||||
|
||||
os_free_mem(os_dma_addresses);
|
||||
}
|
||||
else
|
||||
{
|
||||
WARN_ON(page_size != PAGE_SIZE);
|
||||
|
||||
status = nv_dma_unmap_alloc(&peer_dma_dev,
|
||||
dma_mapping->entries,
|
||||
dma_mapping->dma_addresses,
|
||||
&dma_mapping->private);
|
||||
WARN_ON(status != NV_OK);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
NvU32 i;
|
||||
for (i = 0; i < dma_mapping->entries; i++)
|
||||
{
|
||||
nv_dma_unmap_peer(&peer_dma_dev, page_size / PAGE_SIZE,
|
||||
@@ -197,6 +244,7 @@ static void nv_p2p_free_dma_mapping(
|
||||
}
|
||||
}
|
||||
|
||||
failed:
|
||||
os_free_mem(dma_mapping->dma_addresses);
|
||||
|
||||
os_free_mem(dma_mapping);
|
||||
@@ -316,14 +364,14 @@ int nvidia_p2p_init_mapping(
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_init_mapping);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_init_mapping);
|
||||
|
||||
int nvidia_p2p_destroy_mapping(uint64_t p2p_token)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_destroy_mapping);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_destroy_mapping);
|
||||
|
||||
static void nv_p2p_mem_info_free_callback(void *data)
|
||||
{
|
||||
@@ -587,7 +635,7 @@ int nvidia_p2p_get_pages(
|
||||
p2p_token, va_space, virtual_address,
|
||||
length, page_table, free_callback, data);
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_get_pages);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_get_pages);
|
||||
|
||||
int nvidia_p2p_get_pages_persistent(
|
||||
uint64_t virtual_address,
|
||||
@@ -605,7 +653,7 @@ int nvidia_p2p_get_pages_persistent(
|
||||
virtual_address, length, page_table,
|
||||
NULL, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_get_pages_persistent);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_get_pages_persistent);
|
||||
|
||||
/*
|
||||
* This function is a no-op, but is left in place (for now), in order to allow
|
||||
@@ -618,7 +666,7 @@ int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table)
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_free_page_table);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_free_page_table);
|
||||
|
||||
int nvidia_p2p_put_pages(
|
||||
uint64_t p2p_token,
|
||||
@@ -649,7 +697,7 @@ int nvidia_p2p_put_pages(
|
||||
|
||||
return nvidia_p2p_map_status(status);
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_put_pages);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_put_pages);
|
||||
|
||||
int nvidia_p2p_put_pages_persistent(
|
||||
uint64_t virtual_address,
|
||||
@@ -689,7 +737,7 @@ int nvidia_p2p_put_pages_persistent(
|
||||
|
||||
return nvidia_p2p_map_status(status);
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_put_pages_persistent);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_put_pages_persistent);
|
||||
|
||||
int nvidia_p2p_dma_map_pages(
|
||||
struct pci_dev *peer,
|
||||
@@ -804,7 +852,7 @@ failed:
|
||||
return nvidia_p2p_map_status(status);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
|
||||
|
||||
int nvidia_p2p_dma_unmap_pages(
|
||||
struct pci_dev *peer,
|
||||
@@ -844,7 +892,7 @@ int nvidia_p2p_dma_unmap_pages(
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
|
||||
|
||||
/*
|
||||
* This function is a no-op, but is left in place (for now), in order to allow
|
||||
@@ -859,7 +907,7 @@ int nvidia_p2p_free_dma_mapping(
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);
|
||||
|
||||
int nvidia_p2p_register_rsync_driver(
|
||||
nvidia_p2p_rsync_driver_t *driver,
|
||||
@@ -888,7 +936,7 @@ int nvidia_p2p_register_rsync_driver(
|
||||
driver->wait_for_rsync, data);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver);
|
||||
|
||||
void nvidia_p2p_unregister_rsync_driver(
|
||||
nvidia_p2p_rsync_driver_t *driver,
|
||||
@@ -920,7 +968,7 @@ void nvidia_p2p_unregister_rsync_driver(
|
||||
driver->wait_for_rsync, data);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver);
|
||||
|
||||
int nvidia_p2p_get_rsync_registers(
|
||||
nvidia_p2p_rsync_reg_info_t **reg_info
|
||||
@@ -1013,7 +1061,7 @@ int nvidia_p2p_get_rsync_registers(
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
|
||||
|
||||
void nvidia_p2p_put_rsync_registers(
|
||||
nvidia_p2p_rsync_reg_info_t *reg_info
|
||||
@@ -1045,4 +1093,4 @@ void nvidia_p2p_put_rsync_registers(
|
||||
os_free_mem(reg_info);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);
|
||||
NV_EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);
|
||||
|
||||
@@ -25,6 +25,15 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "nv-pci-table.h"
|
||||
#include "cpuopsys.h"
|
||||
|
||||
#if defined(NV_BSD)
|
||||
/* Define PCI classes that FreeBSD's linuxkpi is missing */
|
||||
#define PCI_VENDOR_ID_NVIDIA 0x10de
|
||||
#define PCI_CLASS_DISPLAY_VGA 0x0300
|
||||
#define PCI_CLASS_DISPLAY_3D 0x0302
|
||||
#define PCI_CLASS_BRIDGE_OTHER 0x0680
|
||||
#endif
|
||||
|
||||
/* Devices supported by RM */
|
||||
struct pci_device_id nv_pci_table[] = {
|
||||
@@ -48,7 +57,7 @@ struct pci_device_id nv_pci_table[] = {
|
||||
};
|
||||
|
||||
/* Devices supported by all drivers in nvidia.ko */
|
||||
struct pci_device_id nv_module_device_table[] = {
|
||||
struct pci_device_id nv_module_device_table[4] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
@@ -76,4 +85,6 @@ struct pci_device_id nv_module_device_table[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
#if defined(NV_LINUX)
|
||||
MODULE_DEVICE_TABLE(pci, nv_module_device_table);
|
||||
#endif
|
||||
|
||||
@@ -27,5 +27,6 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
extern struct pci_device_id nv_pci_table[];
|
||||
extern struct pci_device_id nv_module_device_table[4];
|
||||
|
||||
#endif /* _NV_PCI_TABLE_H_ */
|
||||
|
||||
@@ -280,6 +280,78 @@ resize:
|
||||
#endif /* NV_PCI_REBAR_GET_POSSIBLE_SIZES_PRESENT */
|
||||
}
|
||||
|
||||
#if defined(NV_DEVICE_PROPERTY_READ_U64_PRESENT) && \
|
||||
defined(CONFIG_ACPI_NUMA) && \
|
||||
NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node
|
||||
/*
|
||||
* Parse the SRAT table to look for numa node associated with the GPU.
|
||||
*
|
||||
* find_gpu_numa_nodes_in_srat() is strongly associated with
|
||||
* nv_init_coherent_link_info(). Hence matching the conditions wrapping.
|
||||
*/
|
||||
static NvU32 find_gpu_numa_nodes_in_srat(nv_linux_state_t *nvl)
|
||||
{
|
||||
NvU32 gi_dbdf, dev_dbdf, pxm_count = 0;
|
||||
struct acpi_table_header *table_header;
|
||||
struct acpi_subtable_header *subtable_header;
|
||||
unsigned long table_end, subtable_header_length;
|
||||
struct acpi_srat_generic_affinity *gi;
|
||||
NvU32 numa_node = NUMA_NO_NODE;
|
||||
|
||||
if (acpi_get_table(ACPI_SIG_SRAT, 0, &table_header)) {
|
||||
nv_printf(NV_DBG_INFO, "NVRM: Failed to parse the SRAT table.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
table_end = (unsigned long)table_header + table_header->length;
|
||||
subtable_header = (struct acpi_subtable_header *)
|
||||
((unsigned long)table_header + sizeof(struct acpi_table_srat));
|
||||
subtable_header_length = subtable_header->length;
|
||||
|
||||
dev_dbdf = NV_PCI_DOMAIN_NUMBER(nvl->pci_dev) << 16 |
|
||||
NV_PCI_BUS_NUMBER(nvl->pci_dev) << 8 |
|
||||
NV_PCI_DEVFN(nvl->pci_dev);
|
||||
|
||||
/*
|
||||
* On baremetal and passthrough, there could be upto 8 generic initiators.
|
||||
* This is not a hack as a device can have any number of initiators hardware
|
||||
* supports.
|
||||
*/
|
||||
while (subtable_header_length &&
|
||||
(((unsigned long)subtable_header) + subtable_header_length < table_end)) {
|
||||
|
||||
if (subtable_header->type == ACPI_SRAT_TYPE_GENERIC_AFFINITY) {
|
||||
gi = (struct acpi_srat_generic_affinity *) subtable_header;
|
||||
gi_dbdf = *((NvU16 *)(&gi->device_handle[0])) << 16 |
|
||||
*((NvU16 *)(&gi->device_handle[2]));
|
||||
|
||||
if (gi_dbdf == dev_dbdf) {
|
||||
numa_node = pxm_to_node(gi->proximity_domain);
|
||||
if (numa_node < MAX_NUMNODES) {
|
||||
pxm_count++;
|
||||
set_bit(numa_node, nvl->coherent_link_info.free_node_bitmap);
|
||||
}
|
||||
else {
|
||||
/* We shouldn't be here. This is a mis-configuration. */
|
||||
nv_printf(NV_DBG_INFO, "NVRM: Invalid node-id found.\n");
|
||||
pxm_count = 0;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
subtable_header = (struct acpi_subtable_header *)
|
||||
((unsigned long) subtable_header + subtable_header_length);
|
||||
subtable_header_length = subtable_header->length;
|
||||
}
|
||||
|
||||
exit:
|
||||
acpi_put_table(table_header);
|
||||
return pxm_count;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void
|
||||
nv_init_coherent_link_info
|
||||
(
|
||||
@@ -294,16 +366,28 @@ nv_init_coherent_link_info
|
||||
NvU64 pxm_start = 0;
|
||||
NvU64 pxm_count = 0;
|
||||
NvU32 pxm;
|
||||
NvU32 gi_found = 0, node;
|
||||
|
||||
if (!NVCPU_IS_AARCH64)
|
||||
return;
|
||||
|
||||
if (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-start", &pxm_start) != 0)
|
||||
if (!dev_is_pci(nvl->dev))
|
||||
return;
|
||||
|
||||
gi_found = find_gpu_numa_nodes_in_srat(nvl);
|
||||
if (!gi_found &&
|
||||
(device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-start", &pxm_start) != 0 ||
|
||||
device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-count", &pxm_count) != 0))
|
||||
goto failed;
|
||||
if (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-count", &pxm_count) != 0)
|
||||
goto failed;
|
||||
if (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-base-pa", &pa) != 0)
|
||||
|
||||
if (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-base-pa", &pa) == 0)
|
||||
{
|
||||
nvl->coherent_link_info.gpu_mem_pa = pa;
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned int gpu_bar1_offset, gpu_bar2_offset;
|
||||
|
||||
/*
|
||||
* This implies that the DSD key for PXM start and count is present
|
||||
* while the one for Physical Address (PA) is absent.
|
||||
@@ -316,28 +400,80 @@ nv_init_coherent_link_info
|
||||
|
||||
/*
|
||||
* For the virtualization usecase on SHH, the coherent GPU memory
|
||||
* PA is exposed as BAR1 to the VM and the nvidia,gpu-mem-base-pa
|
||||
* is not present. Set the GPU memory PA to the BAR1 start address.
|
||||
* PA is exposed as BAR2 to the VM and the "nvidia,gpu-mem-base-pa"
|
||||
* is not present. Set the GPU memory PA to the BAR2 start address.
|
||||
*
|
||||
* In the case of passthrough, reserved memory portion of the coherent
|
||||
* GPU memory is exposed as BAR1
|
||||
*/
|
||||
pa = nv->fb->cpu_address;
|
||||
|
||||
/*
|
||||
* Hopper+ uses 64-bit BARs, so GPU BAR2 should be at BAR4/5 and
|
||||
* GPU BAR1 is at BAR2/3
|
||||
*/
|
||||
gpu_bar1_offset = 2;
|
||||
gpu_bar2_offset = 4;
|
||||
|
||||
/*
|
||||
* cannot use nv->bars[] here as it is not populated correctly if BAR1 is
|
||||
* not present but BAR2 is, even though PCIe spec allows it. Not fixing
|
||||
* nv->bars[] since this is not a valid scenario with the actual HW and
|
||||
* possible only with this host emulated BAR scenario.
|
||||
*/
|
||||
if (!((NV_PCI_RESOURCE_VALID(nvl->pci_dev, gpu_bar2_offset)) &&
|
||||
(NV_PCI_RESOURCE_FLAGS(nvl->pci_dev, gpu_bar2_offset) & PCI_BASE_ADDRESS_SPACE)
|
||||
== PCI_BASE_ADDRESS_SPACE_MEMORY))
|
||||
{
|
||||
// BAR2 contains the cacheable part of the coherent FB region and must have.
|
||||
goto failed;
|
||||
}
|
||||
nvl->coherent_link_info.gpu_mem_pa =
|
||||
NV_PCI_RESOURCE_START(nvl->pci_dev, gpu_bar2_offset);
|
||||
|
||||
if ((NV_PCI_RESOURCE_VALID(nvl->pci_dev, gpu_bar1_offset)) &&
|
||||
(NV_PCI_RESOURCE_FLAGS(nvl->pci_dev, gpu_bar1_offset) & PCI_BASE_ADDRESS_SPACE)
|
||||
== PCI_BASE_ADDRESS_SPACE_MEMORY)
|
||||
{
|
||||
// Present only in passthrough case
|
||||
nvl->coherent_link_info.rsvd_mem_pa = NV_PCI_RESOURCE_START(nvl->pci_dev, gpu_bar1_offset);
|
||||
}
|
||||
|
||||
//
|
||||
// Unset nv->bars[] as the BARs in the virtualization case are used
|
||||
// only to convey the coherent GPU memory information and doesn't
|
||||
// contain the traditional GPU BAR1/BAR2. This is to ensure the
|
||||
// coherent FB addresses don't inadvertently pass the IS_FB_OFFSET
|
||||
// or IS_IMEM_OFFSET checks.
|
||||
//
|
||||
memset(&nv->bars[1], 0, sizeof(nv->bars[1]));
|
||||
memset(&nv->bars[2], 0, sizeof(nv->bars[2]));
|
||||
}
|
||||
|
||||
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "DSD properties: \n");
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tGPU memory PA: 0x%lx \n", pa);
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tGPU memory PXM start: %u \n", pxm_start);
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tGPU memory PXM count: %u \n", pxm_count);
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tGPU memory PA: 0x%lx \n",
|
||||
nvl->coherent_link_info.gpu_mem_pa);
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tGPU reserved memory PA: 0x%lx \n",
|
||||
nvl->coherent_link_info.rsvd_mem_pa);
|
||||
|
||||
nvl->coherent_link_info.gpu_mem_pa = pa;
|
||||
|
||||
for (pxm = pxm_start; pxm < (pxm_start + pxm_count); pxm++)
|
||||
if (!gi_found)
|
||||
{
|
||||
NvU32 node = pxm_to_node(pxm);
|
||||
if (node != NUMA_NO_NODE)
|
||||
for (pxm = pxm_start; pxm < (pxm_start + pxm_count); pxm++)
|
||||
{
|
||||
set_bit(node, nvl->coherent_link_info.free_node_bitmap);
|
||||
node = pxm_to_node(pxm);
|
||||
if (node != NUMA_NO_NODE)
|
||||
{
|
||||
set_bit(node, nvl->coherent_link_info.free_node_bitmap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (node = 0; (node = find_next_bit(nvl->coherent_link_info.free_node_bitmap,
|
||||
MAX_NUMNODES, node)) != MAX_NUMNODES; node++)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tNVRM: GPU memory NUMA node: %u\n", node);
|
||||
}
|
||||
|
||||
if (NVreg_EnableUserNUMAManagement && !os_is_vgx_hyper())
|
||||
{
|
||||
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE);
|
||||
@@ -696,6 +832,14 @@ next_bar:
|
||||
goto err_zero_dev;
|
||||
}
|
||||
|
||||
nv->cpu_numa_node_id = dev_to_node(nvl->dev);
|
||||
|
||||
if (nv_linux_init_open_q(nvl) != 0)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "nv_linux_init_open_q() failed!\n");
|
||||
goto err_zero_dev;
|
||||
}
|
||||
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"NVRM: PCI:%04x:%02x:%02x.%x (%04x:%04x): BAR0 @ 0x%llx (%lluMB)\n",
|
||||
nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot,
|
||||
@@ -720,7 +864,7 @@ next_bar:
|
||||
if (nv_linux_add_device_locked(nvl) != 0)
|
||||
{
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
goto err_zero_dev;
|
||||
goto err_add_device;
|
||||
}
|
||||
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
@@ -772,6 +916,8 @@ err_vgpu_kvm:
|
||||
LOCK_NV_LINUX_DEVICES();
|
||||
nv_linux_remove_device_locked(nvl);
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
err_add_device:
|
||||
nv_linux_stop_open_q(nvl);
|
||||
err_zero_dev:
|
||||
rm_free_private_state(sp, nv);
|
||||
err_not_supported:
|
||||
@@ -820,14 +966,22 @@ nv_pci_remove(struct pci_dev *pci_dev)
|
||||
return;
|
||||
}
|
||||
|
||||
LOCK_NV_LINUX_DEVICES();
|
||||
nvl = pci_get_drvdata(pci_dev);
|
||||
if (!nvl || (nvl->pci_dev != pci_dev))
|
||||
{
|
||||
goto done;
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
return;
|
||||
}
|
||||
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
|
||||
/*
|
||||
* Flush and stop open_q before proceeding with removal to ensure nvl
|
||||
* outlives all enqueued work items.
|
||||
*/
|
||||
nv_linux_stop_open_q(nvl);
|
||||
|
||||
LOCK_NV_LINUX_DEVICES();
|
||||
down(&nvl->ldata_lock);
|
||||
|
||||
/*
|
||||
@@ -1067,6 +1221,21 @@ NvU8 nv_find_pci_capability(struct pci_dev *pci_dev, NvU8 capability)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void check_for_bound_driver(struct pci_dev *pci_dev)
|
||||
{
|
||||
if (pci_dev->dev.driver)
|
||||
{
|
||||
const char *driver_name = pci_dev->dev.driver->name;
|
||||
|
||||
nv_printf(NV_DBG_WARNINGS, "NVRM: GPU %04x:%02x:%02x.%x is already "
|
||||
"bound to %s.\n",
|
||||
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
|
||||
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn),
|
||||
driver_name ? driver_name : "another driver"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/* make sure the pci_driver called probe for all of our devices.
|
||||
* we've seen cases where rivafb claims the device first and our driver
|
||||
* doesn't get called.
|
||||
@@ -1094,6 +1263,7 @@ nv_pci_count_devices(void)
|
||||
pci_dev->subsystem_device,
|
||||
NV_TRUE /* print_legacy_warning */))
|
||||
{
|
||||
check_for_bound_driver(pci_dev);
|
||||
count++;
|
||||
}
|
||||
pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pci_dev);
|
||||
@@ -1111,6 +1281,7 @@ nv_pci_count_devices(void)
|
||||
pci_dev->subsystem_device,
|
||||
NV_TRUE /* print_legacy_warning */))
|
||||
{
|
||||
check_for_bound_driver(pci_dev);
|
||||
count++;
|
||||
}
|
||||
pci_dev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pci_dev);
|
||||
|
||||
@@ -822,6 +822,23 @@
|
||||
#define __NV_RM_NVLINK_BW RmNvlinkBandwidth
|
||||
#define NV_RM_NVLINK_BW NV_REG_STRING(__NV_RM_NVLINK_BW)
|
||||
|
||||
/*
|
||||
* Option: NVreg_EnableNonblockingOpen
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA driver will try to perform any
|
||||
* required device initialization in the background when /dev/nvidiaN devices
|
||||
* are opened with the flag O_NONBLOCK.
|
||||
*
|
||||
* Possible Values:
|
||||
* 0 = O_NONBLOCK flag when opening devices is ignored
|
||||
* 1 = O_NONBLOCK flag when opening devices results in background device
|
||||
* initialization (default)
|
||||
*/
|
||||
#define __NV_ENABLE_NONBLOCKING_OPEN EnableNonblockingOpen
|
||||
#define NV_ENABLE_NONBLOCKING_OPEN NV_REG_STRING(__NV_ENABLE_NONBLOCKING_OPEN)
|
||||
|
||||
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
|
||||
|
||||
/*
|
||||
@@ -860,6 +877,7 @@ NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_RESIZABLE_BAR, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_NONBLOCKING_OPEN, 1);
|
||||
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL);
|
||||
|
||||
@@ -516,7 +516,6 @@ NV_STATUS nv_alloc_system_pages(
|
||||
|
||||
// Order should be zero except for EGM allocations.
|
||||
unsigned int alloc_page_size = PAGE_SIZE << at->order;
|
||||
unsigned int alloc_page_shift = BIT_IDX_32(alloc_page_size);
|
||||
unsigned int alloc_num_pages = NV_CEIL(at->num_pages * PAGE_SIZE, alloc_page_size);
|
||||
|
||||
unsigned int sub_page_idx;
|
||||
@@ -644,8 +643,6 @@ void nv_free_system_pages(
|
||||
|
||||
// Order should be zero except for EGM allocations.
|
||||
unsigned int alloc_page_size = PAGE_SIZE << at->order;
|
||||
unsigned int alloc_page_shift = BIT_IDX_32(alloc_page_size);
|
||||
unsigned int alloc_num_pages = NV_CEIL(at->num_pages * PAGE_SIZE, alloc_page_size);
|
||||
unsigned int os_pages_in_page = alloc_page_size / PAGE_SIZE;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
|
||||
@@ -37,7 +37,6 @@
|
||||
#include "nv-msi.h"
|
||||
#include "nv-pci-table.h"
|
||||
#include "nv-chardev-numbers.h"
|
||||
#include "nv-register-module.h" // TODO remove once NVKMS migrates
|
||||
|
||||
#if defined(NV_UVM_ENABLE)
|
||||
#include "nv_uvm_interface.h"
|
||||
@@ -156,10 +155,6 @@ static struct cdev nv_linux_devices_cdev;
|
||||
// cdev covering the control device
|
||||
static struct cdev nv_linux_control_device_cdev;
|
||||
|
||||
// lock for nvidia_register_module "extmod" emulation
|
||||
// TODO remove once NVKMS migrates
|
||||
static struct semaphore nv_extmod_lock;
|
||||
|
||||
extern NvU32 nv_dma_remap_peer_mmio;
|
||||
|
||||
nv_kthread_q_t nv_kthread_q;
|
||||
@@ -182,8 +177,6 @@ static int nv_tce_bypass_mode = NV_TCE_BYPASS_MODE_DEFAULT;
|
||||
|
||||
struct semaphore nv_linux_devices_lock;
|
||||
|
||||
static NvTristate nv_chipset_is_io_coherent = NV_TRISTATE_INDETERMINATE;
|
||||
|
||||
// True if all the successfully probed devices support ATS
|
||||
// Assigned at device probe (module init) time
|
||||
NvBool nv_ats_supported = NVCPU_IS_PPC64LE
|
||||
@@ -667,8 +660,6 @@ nv_module_init(nv_stack_t **sp)
|
||||
{
|
||||
int rc;
|
||||
|
||||
NV_INIT_MUTEX(&nv_extmod_lock); // TODO remove once NVKMS migrates
|
||||
|
||||
rc = nv_module_resources_init(sp);
|
||||
if (rc < 0)
|
||||
{
|
||||
@@ -861,9 +852,8 @@ static int __init nvidia_init_module(void)
|
||||
"NVRM: The NVIDIA probe routine was not called for %d device(s).\n",
|
||||
count - num_probed_nv_devices);
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: This can occur when a driver such as: \n"
|
||||
"NVRM: nouveau, rivafb, nvidiafb or rivatv "
|
||||
"\nNVRM: was loaded and obtained ownership of the NVIDIA device(s).\n");
|
||||
"NVRM: This can occur when another driver was loaded and \n"
|
||||
"NVRM: obtained ownership of the NVIDIA device(s).\n");
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Try unloading the conflicting kernel module (and/or\n"
|
||||
"NVRM: reconfigure your kernel without the conflicting\n"
|
||||
@@ -1019,12 +1009,32 @@ static void nv_free_file_private(nv_linux_file_private_t *nvlfp)
|
||||
NV_KFREE(nvlfp, sizeof(nv_linux_file_private_t));
|
||||
}
|
||||
|
||||
|
||||
static int nv_is_control_device(
|
||||
struct inode *inode
|
||||
)
|
||||
/*
|
||||
* Find the nv device with the given minor device number in the minor number
|
||||
* table. Caller should hold nv_linux_devices_lock using
|
||||
* LOCK_NV_LINUX_DEVICES. This function does not automatically take
|
||||
* nvl->ldata_lock, so the caller must do that if required.
|
||||
*/
|
||||
static nv_linux_state_t *find_minor_locked(NvU32 minor)
|
||||
{
|
||||
return (minor((inode)->i_rdev) == NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE);
|
||||
nv_linux_state_t *nvl;
|
||||
|
||||
if (minor > NV_MINOR_DEVICE_NUMBER_REGULAR_MAX)
|
||||
return NULL;
|
||||
|
||||
nvl = nv_linux_minor_num_table[minor];
|
||||
if (nvl == NULL)
|
||||
{
|
||||
// there isn't actually a GPU present for nv_linux_minor_num_table[minor]
|
||||
}
|
||||
else if (nvl->minor_num != minor)
|
||||
{
|
||||
// nv_linux_minor_num_table out of sync -- this shouldn't happen
|
||||
WARN_ON(1);
|
||||
nvl = NULL;
|
||||
}
|
||||
|
||||
return nvl;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1040,17 +1050,11 @@ static nv_linux_state_t *find_minor(NvU32 minor)
|
||||
|
||||
LOCK_NV_LINUX_DEVICES();
|
||||
|
||||
nvl = nv_linux_minor_num_table[minor];
|
||||
if (nvl->minor_num == minor)
|
||||
nvl = find_minor_locked(minor);
|
||||
if (nvl != NULL)
|
||||
{
|
||||
down(&nvl->ldata_lock);
|
||||
}
|
||||
else
|
||||
{
|
||||
// nv_linux_minor_num_table out of sync -- this shouldn't happen
|
||||
WARN_ON(1);
|
||||
nvl = NULL;
|
||||
}
|
||||
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
return nvl;
|
||||
@@ -1320,12 +1324,11 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
rm_read_registry_dword(sp, nv, NV_REG_ENABLE_MSI, &msi_config);
|
||||
if (msi_config == 1)
|
||||
{
|
||||
if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSIX))
|
||||
if (nvl->pci_dev->msix_cap && rm_is_msix_allowed(sp, nv))
|
||||
{
|
||||
nv_init_msix(nv);
|
||||
}
|
||||
if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSI) &&
|
||||
!(nv->flags & NV_FLAG_USES_MSIX))
|
||||
if (nvl->pci_dev->msi_cap && !(nv->flags & NV_FLAG_USES_MSIX))
|
||||
{
|
||||
nv_init_msi(nv);
|
||||
}
|
||||
@@ -1528,6 +1531,17 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
int rc;
|
||||
NV_STATUS status;
|
||||
|
||||
if ((nv->flags & NV_FLAG_EXCLUDE) != 0)
|
||||
{
|
||||
char *uuid = rm_get_gpu_uuid(sp, nv);
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"open() not permitted for excluded %s\n",
|
||||
(uuid != NULL) ? uuid : "GPU");
|
||||
if (uuid != NULL)
|
||||
os_free_mem(uuid);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (os_is_vgx_hyper())
|
||||
{
|
||||
/* fail open if GPU is being unbound */
|
||||
@@ -1575,6 +1589,8 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nv_assert_not_in_gpu_exclusion_list(sp, nv);
|
||||
|
||||
NV_ATOMIC_INC(nvl->usage_count);
|
||||
return 0;
|
||||
}
|
||||
@@ -1601,6 +1617,108 @@ static void nv_init_mapping_revocation(nv_linux_state_t *nvl,
|
||||
up(&nvl->mmap_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Like nv_open_device but stores rc and adapter status in the given nvlfp.
|
||||
* Assumes nvl->ldata_lock is held.
|
||||
*/
|
||||
static int nv_open_device_for_nvlfp(
|
||||
nv_state_t *nv,
|
||||
nvidia_stack_t *sp,
|
||||
nv_linux_file_private_t *nvlfp
|
||||
)
|
||||
{
|
||||
nvlfp->open_rc = nv_open_device(nv, sp);
|
||||
|
||||
if (nvlfp->open_rc == 0)
|
||||
{
|
||||
nvlfp->adapter_status = NV_OK;
|
||||
}
|
||||
else
|
||||
{
|
||||
nvlfp->adapter_status = rm_get_adapter_status_external(sp, nv);
|
||||
}
|
||||
|
||||
return nvlfp->open_rc;
|
||||
}
|
||||
|
||||
static void nvidia_open_deferred(void *nvlfp_raw)
|
||||
{
|
||||
nv_linux_file_private_t *nvlfp = (nv_linux_file_private_t *) nvlfp_raw;
|
||||
nv_linux_state_t *nvl = nvlfp->deferred_open_nvl;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Deferred opens and device removal are synchronized via
|
||||
* nvl->is_accepting_opens and nvl->open_q flushes so that nvl is
|
||||
* guaranteed to outlive any pending open operation.
|
||||
*
|
||||
* So, it is safe to take nvl->ldata_lock here without holding
|
||||
* any refcount or larger lock.
|
||||
*
|
||||
* Deferred opens and system suspend are synchronized by an explicit
|
||||
* nvl->open_q flush before suspending.
|
||||
*
|
||||
* So, it is safe to proceed without nv_system_pm_lock here (in fact, it
|
||||
* must not be taken to ensure nvl->open_q can make forward progress).
|
||||
*/
|
||||
down(&nvl->ldata_lock);
|
||||
rc = nv_open_device_for_nvlfp(NV_STATE_PTR(nvl), nvlfp->sp, nvlfp);
|
||||
up(&nvl->ldata_lock);
|
||||
|
||||
/* Set nvptr only upon success (where nvl->usage_count is incremented) */
|
||||
if (rc == 0)
|
||||
nvlfp->nvptr = nvl;
|
||||
|
||||
complete_all(&nvlfp->open_complete);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to prepare (by taking nvl->ldata_lock) for an open in the foreground
|
||||
* for the given file and device.
|
||||
*
|
||||
* This succeeds if:
|
||||
* - O_NONBLOCK is not passed (or non-blocking opens are disabled), or
|
||||
* - O_NONBLOCK is passed, but we are able to determine (without blocking)
|
||||
* that the device is already initialized
|
||||
*
|
||||
* Returns 0 with nvl->ldata_lock taken if open can occur in the foreground.
|
||||
* Otherwise, returns non-zero (without nvl->ldata_lock taken).
|
||||
*/
|
||||
static int nv_try_lock_foreground_open(
|
||||
struct file *file,
|
||||
nv_linux_state_t *nvl
|
||||
)
|
||||
{
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
|
||||
if (NVreg_EnableNonblockingOpen && (file->f_flags & O_NONBLOCK))
|
||||
{
|
||||
if (down_trylock(&nvl->ldata_lock) == 0)
|
||||
{
|
||||
if (nv->flags & NV_FLAG_OPEN)
|
||||
{
|
||||
/* device already initialized */
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* device not initialized yet */
|
||||
up(&nvl->ldata_lock);
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* unable to check nv->flags safely without blocking */
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
}
|
||||
|
||||
/* O_NONBLOCK not passed or non-blocking opens are disabled */
|
||||
down(&nvl->ldata_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
** nvidia_open
|
||||
**
|
||||
@@ -1651,37 +1769,81 @@ nvidia_open(
|
||||
if (rc < 0)
|
||||
goto failed;
|
||||
|
||||
/* Takes nvl->ldata_lock */
|
||||
nvl = find_minor(NV_DEVICE_MINOR_NUMBER(inode));
|
||||
if (!nvl)
|
||||
/* nvptr will get set to actual nvl upon successful open */
|
||||
nvlfp->nvptr = NULL;
|
||||
|
||||
init_completion(&nvlfp->open_complete);
|
||||
|
||||
LOCK_NV_LINUX_DEVICES();
|
||||
|
||||
nvl = find_minor_locked(NV_DEVICE_MINOR_NUMBER(inode));
|
||||
if (nvl == NULL)
|
||||
{
|
||||
rc = -ENODEV;
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
up_read(&nv_system_pm_lock);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
nvlfp->nvptr = nvl;
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
|
||||
if ((nv->flags & NV_FLAG_EXCLUDE) != 0)
|
||||
if (nv_try_lock_foreground_open(file, nvl) == 0)
|
||||
{
|
||||
char *uuid = rm_get_gpu_uuid(sp, nv);
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"open() not permitted for excluded %s\n",
|
||||
(uuid != NULL) ? uuid : "GPU");
|
||||
if (uuid != NULL)
|
||||
os_free_mem(uuid);
|
||||
rc = -EPERM;
|
||||
goto failed1;
|
||||
/* Proceed in foreground */
|
||||
/* nvl->ldata_lock is already taken at this point */
|
||||
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
|
||||
rc = nv_open_device_for_nvlfp(nv, nvlfp->sp, nvlfp);
|
||||
|
||||
up(&nvl->ldata_lock);
|
||||
|
||||
/* Set nvptr only upon success (where nvl->usage_count is incremented) */
|
||||
if (rc == 0)
|
||||
nvlfp->nvptr = nvl;
|
||||
|
||||
complete_all(&nvlfp->open_complete);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Defer to background kthread */
|
||||
int item_scheduled = 0;
|
||||
|
||||
rc = nv_open_device(nv, sp);
|
||||
/* Fall-through on error */
|
||||
/*
|
||||
* Take nvl->open_q_lock in order to check nvl->is_accepting_opens and
|
||||
* schedule work items on nvl->open_q.
|
||||
*
|
||||
* Continue holding nv_linux_devices_lock (LOCK_NV_LINUX_DEVICES)
|
||||
* until the work item gets onto nvl->open_q in order to ensure the
|
||||
* lifetime of nvl.
|
||||
*/
|
||||
down(&nvl->open_q_lock);
|
||||
|
||||
nv_assert_not_in_gpu_exclusion_list(sp, nv);
|
||||
if (!nvl->is_accepting_opens)
|
||||
{
|
||||
/* Background kthread is not accepting opens, bail! */
|
||||
rc = -EBUSY;
|
||||
goto nonblock_end;
|
||||
}
|
||||
|
||||
failed1:
|
||||
up(&nvl->ldata_lock);
|
||||
nvlfp->deferred_open_nvl = nvl;
|
||||
nv_kthread_q_item_init(&nvlfp->open_q_item,
|
||||
nvidia_open_deferred,
|
||||
nvlfp);
|
||||
|
||||
item_scheduled = nv_kthread_q_schedule_q_item(
|
||||
&nvl->open_q, &nvlfp->open_q_item);
|
||||
|
||||
if (!item_scheduled)
|
||||
{
|
||||
WARN_ON(!item_scheduled);
|
||||
rc = -EBUSY;
|
||||
}
|
||||
|
||||
nonblock_end:
|
||||
up(&nvl->open_q_lock);
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
}
|
||||
|
||||
up_read(&nv_system_pm_lock);
|
||||
failed:
|
||||
@@ -1876,11 +2038,27 @@ nvidia_close_callback(
|
||||
nv_linux_file_private_t *nvlfp
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = nvlfp->nvptr;
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
nvidia_stack_t *sp = nvlfp->sp;
|
||||
nv_linux_state_t *nvl;
|
||||
nv_state_t *nv;
|
||||
nvidia_stack_t *sp;
|
||||
NvBool bRemove = NV_FALSE;
|
||||
|
||||
nvl = nvlfp->nvptr;
|
||||
if (nvl == NULL)
|
||||
{
|
||||
/*
|
||||
* If nvlfp has no associated nvl device (meaning the open operation
|
||||
* failed), then there is no state outside of nvlfp to cleanup.
|
||||
*/
|
||||
|
||||
nv_free_file_private(nvlfp);
|
||||
nv_kmem_cache_free_stack(nvlfp->sp);
|
||||
return;
|
||||
}
|
||||
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
sp = nvlfp->sp;
|
||||
|
||||
rm_cleanup_file_private(sp, nv, &nvlfp->nvfp);
|
||||
|
||||
down(&nvl->mmap_lock);
|
||||
@@ -1934,6 +2112,8 @@ static void nvidia_close_deferred(void *data)
|
||||
{
|
||||
nv_linux_file_private_t *nvlfp = data;
|
||||
|
||||
nv_wait_open_complete(nvlfp);
|
||||
|
||||
down_read(&nv_system_pm_lock);
|
||||
|
||||
nvidia_close_callback(nvlfp);
|
||||
@@ -1949,10 +2129,10 @@ nvidia_close(
|
||||
{
|
||||
int rc;
|
||||
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
||||
nv_linux_state_t *nvl = nvlfp->nvptr;
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "nvidia_close on GPU with minor number %d\n", NV_DEVICE_MINOR_NUMBER(inode));
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"NVRM: nvidia_close on GPU with minor number %d\n",
|
||||
NV_DEVICE_MINOR_NUMBER(inode));
|
||||
|
||||
if (nv_is_control_device(inode))
|
||||
{
|
||||
@@ -1961,7 +2141,12 @@ nvidia_close(
|
||||
|
||||
NV_SET_FILE_PRIVATE(file, NULL);
|
||||
|
||||
rc = nv_down_read_interruptible(&nv_system_pm_lock);
|
||||
rc = nv_wait_open_complete_interruptible(nvlfp);
|
||||
if (rc == 0)
|
||||
{
|
||||
rc = nv_down_read_interruptible(&nv_system_pm_lock);
|
||||
}
|
||||
|
||||
if (rc == 0)
|
||||
{
|
||||
nvidia_close_callback(nvlfp);
|
||||
@@ -1989,10 +2174,26 @@ nvidia_poll(
|
||||
unsigned int mask = 0;
|
||||
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
||||
unsigned long eflags;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file);
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
nv_linux_state_t *nvl;
|
||||
nv_state_t *nv;
|
||||
NV_STATUS status;
|
||||
|
||||
if (!nv_is_control_device(NV_FILE_INODE(file)))
|
||||
{
|
||||
if (!nv_is_open_complete(nvlfp))
|
||||
{
|
||||
return POLLERR;
|
||||
}
|
||||
}
|
||||
|
||||
nvl = nvlfp->nvptr;
|
||||
if (nvl == NULL)
|
||||
{
|
||||
return POLLERR;
|
||||
}
|
||||
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
|
||||
status = nv_check_gpu_state(nv);
|
||||
if (status == NV_ERR_GPU_IS_LOST)
|
||||
{
|
||||
@@ -2097,9 +2298,9 @@ nvidia_ioctl(
|
||||
{
|
||||
NV_STATUS rmStatus;
|
||||
int status = 0;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file);
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
||||
nv_linux_state_t *nvl;
|
||||
nv_state_t *nv;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
nv_ioctl_xfer_t ioc_xfer;
|
||||
void *arg_ptr = (void *) i_arg;
|
||||
@@ -2110,23 +2311,11 @@ nvidia_ioctl(
|
||||
nv_printf(NV_DBG_INFO, "NVRM: ioctl(0x%x, 0x%x, 0x%x)\n",
|
||||
_IOC_NR(cmd), (unsigned int) i_arg, _IOC_SIZE(cmd));
|
||||
|
||||
status = nv_down_read_interruptible(&nv_system_pm_lock);
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
status = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (status != 0)
|
||||
if (!nv_is_control_device(inode))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to allocate altstack for ioctl\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
rmStatus = nv_check_gpu_state(nv);
|
||||
if (rmStatus == NV_ERR_GPU_IS_LOST)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping nvidia_ioctl\n");
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
status = nv_wait_open_complete_interruptible(nvlfp);
|
||||
if (status != 0)
|
||||
goto done_early;
|
||||
}
|
||||
|
||||
arg_size = _IOC_SIZE(cmd);
|
||||
@@ -2139,7 +2328,7 @@ nvidia_ioctl(
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: invalid ioctl XFER structure size!\n");
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
goto done_early;
|
||||
}
|
||||
|
||||
if (NV_COPY_FROM_USER(&ioc_xfer, arg_ptr, sizeof(ioc_xfer)))
|
||||
@@ -2147,7 +2336,7 @@ nvidia_ioctl(
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: failed to copy in ioctl XFER data!\n");
|
||||
status = -EFAULT;
|
||||
goto done;
|
||||
goto done_early;
|
||||
}
|
||||
|
||||
arg_cmd = ioc_xfer.cmd;
|
||||
@@ -2158,7 +2347,7 @@ nvidia_ioctl(
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: invalid ioctl XFER size!\n");
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
goto done_early;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2167,13 +2356,55 @@ nvidia_ioctl(
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate ioctl memory\n");
|
||||
status = -ENOMEM;
|
||||
goto done;
|
||||
goto done_early;
|
||||
}
|
||||
|
||||
if (NV_COPY_FROM_USER(arg_copy, arg_ptr, arg_size))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy in ioctl data!\n");
|
||||
status = -EFAULT;
|
||||
goto done_early;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle NV_ESC_WAIT_OPEN_COMPLETE early as it is allowed to work
|
||||
* with or without nvl.
|
||||
*/
|
||||
if (arg_cmd == NV_ESC_WAIT_OPEN_COMPLETE)
|
||||
{
|
||||
nv_ioctl_wait_open_complete_t *params = arg_copy;
|
||||
params->rc = nvlfp->open_rc;
|
||||
params->adapterStatus = nvlfp->adapter_status;
|
||||
goto done_early;
|
||||
}
|
||||
|
||||
nvl = nvlfp->nvptr;
|
||||
if (nvl == NULL)
|
||||
{
|
||||
status = -EIO;
|
||||
goto done_early;
|
||||
}
|
||||
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
|
||||
status = nv_down_read_interruptible(&nv_system_pm_lock);
|
||||
if (status < 0)
|
||||
{
|
||||
goto done_early;
|
||||
}
|
||||
|
||||
status = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (status != 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to allocate altstack for ioctl\n");
|
||||
goto done_pm_unlock;
|
||||
}
|
||||
|
||||
rmStatus = nv_check_gpu_state(nv);
|
||||
if (rmStatus == NV_ERR_GPU_IS_LOST)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping nvidia_ioctl\n");
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
@@ -2425,8 +2656,10 @@ unlock:
|
||||
done:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
done_pm_unlock:
|
||||
up_read(&nv_system_pm_lock);
|
||||
|
||||
done_early:
|
||||
if (arg_copy != NULL)
|
||||
{
|
||||
if (status != -EFAULT)
|
||||
@@ -3285,6 +3518,16 @@ NV_STATUS NV_API_CALL nv_get_phys_pages(
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
void nv_get_disp_smmu_stream_ids
|
||||
(
|
||||
nv_state_t *nv,
|
||||
NvU32 *dispIsoStreamId,
|
||||
NvU32 *dispNisoStreamId)
|
||||
{
|
||||
*dispIsoStreamId = nv->iommus.dispIsoStreamId;
|
||||
*dispNisoStreamId = nv->iommus.dispNisoStreamId;
|
||||
}
|
||||
|
||||
void* NV_API_CALL nv_alloc_kernel_mapping(
|
||||
nv_state_t *nv,
|
||||
void *pAllocPrivate,
|
||||
@@ -3486,6 +3729,11 @@ NV_STATUS NV_API_CALL nv_alloc_pages(
|
||||
}
|
||||
else
|
||||
{
|
||||
if (page_size == 0)
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto failed;
|
||||
}
|
||||
at->order = get_order(page_size);
|
||||
status = nv_alloc_system_pages(nv, at);
|
||||
}
|
||||
@@ -3578,6 +3826,7 @@ NvBool nv_lock_init_locks
|
||||
|
||||
NV_INIT_MUTEX(&nvl->ldata_lock);
|
||||
NV_INIT_MUTEX(&nvl->mmap_lock);
|
||||
NV_INIT_MUTEX(&nvl->open_q_lock);
|
||||
|
||||
NV_ATOMIC_SET(nvl->usage_count, 0);
|
||||
|
||||
@@ -3898,8 +4147,11 @@ nvos_count_devices(void)
|
||||
return count;
|
||||
}
|
||||
|
||||
#if NVCPU_IS_AARCH64
|
||||
NvBool nvos_is_chipset_io_coherent(void)
|
||||
{
|
||||
static NvTristate nv_chipset_is_io_coherent = NV_TRISTATE_INDETERMINATE;
|
||||
|
||||
if (nv_chipset_is_io_coherent == NV_TRISTATE_INDETERMINATE)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
@@ -3918,6 +4170,7 @@ NvBool nvos_is_chipset_io_coherent(void)
|
||||
|
||||
return nv_chipset_is_io_coherent;
|
||||
}
|
||||
#endif // NVCPU_IS_AARCH64
|
||||
|
||||
#if defined(CONFIG_PM)
|
||||
static NV_STATUS
|
||||
@@ -3948,6 +4201,17 @@ nv_power_management(
|
||||
/* fall through */
|
||||
case NV_PM_ACTION_HIBERNATE:
|
||||
{
|
||||
/*
|
||||
* Flush nvl->open_q before suspend/hibernate to ensure deferred
|
||||
* opens do not get attempted during the PM transition.
|
||||
*
|
||||
* Note: user space is either frozen by the kernel or locked out
|
||||
* by nv_system_pm_lock, so no further deferred opens can be
|
||||
* enqueued before resume (meaning we do not need to unset
|
||||
* nvl->is_accepting_opens).
|
||||
*/
|
||||
nv_kthread_q_flush(&nvl->open_q);
|
||||
|
||||
status = rm_power_management(sp, nv, pm_action);
|
||||
|
||||
nv_kthread_q_stop(&nvl->bottom_half_q);
|
||||
@@ -4656,7 +4920,7 @@ NvU64 NV_API_CALL nv_get_dma_start_address(
|
||||
}
|
||||
else if ((dma_addr & saved_dma_mask) != 0)
|
||||
{
|
||||
NvU64 memory_size = os_get_num_phys_pages() * PAGE_SIZE;
|
||||
NvU64 memory_size = NV_NUM_PHYSPAGES * PAGE_SIZE;
|
||||
if ((dma_addr & ~saved_dma_mask) !=
|
||||
((dma_addr + memory_size) & ~saved_dma_mask))
|
||||
{
|
||||
@@ -4964,6 +5228,7 @@ NV_STATUS NV_API_CALL nv_get_device_memory_config(
|
||||
nv_state_t *nv,
|
||||
NvU64 *compr_addr_sys_phys,
|
||||
NvU64 *addr_guest_phys,
|
||||
NvU64 *rsvd_phys,
|
||||
NvU32 *addr_width,
|
||||
NvS32 *node_id
|
||||
)
|
||||
@@ -5024,6 +5289,10 @@ NV_STATUS NV_API_CALL nv_get_device_memory_config(
|
||||
{
|
||||
*addr_guest_phys = nvl->coherent_link_info.gpu_mem_pa;
|
||||
}
|
||||
if (rsvd_phys)
|
||||
{
|
||||
*rsvd_phys = nvl->coherent_link_info.rsvd_mem_pa;
|
||||
}
|
||||
if (addr_width)
|
||||
{
|
||||
// TH500 PA width - NV_PFB_PRI_MMU_ATS_ADDR_RANGE_GRANULARITY
|
||||
@@ -5240,6 +5509,35 @@ void nv_linux_remove_device_locked(nv_linux_state_t *nvl)
|
||||
nv_linux_minor_num_table[nvl->minor_num] = NULL;
|
||||
}
|
||||
|
||||
int nv_linux_init_open_q(nv_linux_state_t *nvl)
|
||||
{
|
||||
int rc;
|
||||
rc = nv_kthread_q_init(&nvl->open_q, "nv_open_q");
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
down(&nvl->open_q_lock);
|
||||
nvl->is_accepting_opens = NV_TRUE;
|
||||
up(&nvl->open_q_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_linux_stop_open_q(nv_linux_state_t *nvl)
|
||||
{
|
||||
NvBool should_stop = NV_FALSE;
|
||||
|
||||
down(&nvl->open_q_lock);
|
||||
if (nvl->is_accepting_opens)
|
||||
{
|
||||
should_stop = NV_TRUE;
|
||||
nvl->is_accepting_opens = NV_FALSE;
|
||||
}
|
||||
up(&nvl->open_q_lock);
|
||||
|
||||
if (should_stop)
|
||||
nv_kthread_q_stop(&nvl->open_q);
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_control_soc_irqs(nv_state_t *nv, NvBool bEnable)
|
||||
{
|
||||
int count;
|
||||
@@ -5452,15 +5750,10 @@ void NV_API_CALL nv_audio_dynamic_power(
|
||||
static int nv_match_dev_state(const void *data, struct file *filp, unsigned fd)
|
||||
{
|
||||
nv_linux_state_t *nvl = NULL;
|
||||
dev_t rdev = 0;
|
||||
|
||||
if (filp == NULL ||
|
||||
filp->private_data == NULL ||
|
||||
NV_FILE_INODE(filp) == NULL)
|
||||
return 0;
|
||||
|
||||
rdev = (NV_FILE_INODE(filp))->i_rdev;
|
||||
if (MAJOR(rdev) != NV_MAJOR_DEVICE_NUMBER)
|
||||
filp->f_op != &nvidia_fops ||
|
||||
filp->private_data == NULL)
|
||||
return 0;
|
||||
|
||||
nvl = NV_GET_NVL_FROM_FILEP(filp);
|
||||
@@ -5758,89 +6051,5 @@ failed:
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* nvidia_register_module "extmod" emulation
|
||||
*
|
||||
* TODO remove once NVKMS migrates
|
||||
*
|
||||
* Emulate nv-frontend's behavior of enabling the use of minor number 254
|
||||
* given module->instance == 1 via the file operations provided in the
|
||||
* nvidia_module_t structure.
|
||||
*
|
||||
* This path is only used by NVKMS and will be removed once NVKMS migrates
|
||||
* to export its own file_operations structure directly.
|
||||
*/
|
||||
static struct file_operations nv_extmod_fops;
|
||||
static struct cdev nv_extmod_cdev;
|
||||
static nvidia_module_t *nv_extmod;
|
||||
|
||||
static long nv_extmod_unlocked_ioctl(
|
||||
struct file *file,
|
||||
unsigned int cmd,
|
||||
unsigned long i_arg
|
||||
)
|
||||
{
|
||||
return nv_extmod->ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
|
||||
}
|
||||
|
||||
int nvidia_register_module(nvidia_module_t *module)
|
||||
{
|
||||
int rc;
|
||||
|
||||
down(&nv_extmod_lock);
|
||||
if ((nv_extmod != NULL) || (module == NULL) || (module->instance != 1))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: NVIDIA module (emulated) registration failed.\n");
|
||||
up(&nv_extmod_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&nv_extmod_fops, 0, sizeof(nv_extmod_fops));
|
||||
nv_extmod_fops.owner = module->owner;
|
||||
nv_extmod_fops.poll = module->poll;
|
||||
nv_extmod_fops.unlocked_ioctl = nv_extmod_unlocked_ioctl;
|
||||
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
|
||||
nv_extmod_fops.compat_ioctl = nv_extmod_unlocked_ioctl;
|
||||
#endif
|
||||
nv_extmod_fops.mmap = module->mmap;
|
||||
nv_extmod_fops.open = module->open;
|
||||
nv_extmod_fops.release = module->close;
|
||||
|
||||
rc = nv_register_chrdev(NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE, 1,
|
||||
&nv_extmod_cdev, "nvidia-modeset", &nv_extmod_fops);
|
||||
if (rc < 0)
|
||||
{
|
||||
up(&nv_extmod_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
nv_extmod = module;
|
||||
up(&nv_extmod_lock);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_register_module);
|
||||
|
||||
int nvidia_unregister_module(nvidia_module_t *module)
|
||||
{
|
||||
down(&nv_extmod_lock);
|
||||
if (nv_extmod == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: NVIDIA module (emulated) non-existent de-registration.\n");
|
||||
up(&nv_extmod_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_unregister_chrdev(NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE, 1,
|
||||
&nv_extmod_cdev);
|
||||
|
||||
nv_extmod = NULL;
|
||||
up(&nv_extmod_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_unregister_module);
|
||||
|
||||
|
||||
module_init(nvidia_init_module);
|
||||
module_exit(nvidia_exit_module);
|
||||
|
||||
@@ -60,6 +60,7 @@ NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device);
|
||||
NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device,
|
||||
NvU64 vaBase,
|
||||
NvU64 vaSize,
|
||||
NvBool enableAts,
|
||||
gpuAddressSpaceHandle *vaSpace,
|
||||
UvmGpuAddressSpaceInfo *vaSpaceInfo);
|
||||
|
||||
@@ -93,11 +94,6 @@ NV_STATUS nvGpuOpsPmaPinPages(void *pPma,
|
||||
NvU64 pageSize,
|
||||
NvU32 flags);
|
||||
|
||||
NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU64 pageSize);
|
||||
|
||||
NV_STATUS nvGpuOpsTsgAllocate(gpuAddressSpaceHandle vaSpace,
|
||||
const gpuTsgAllocParams *params,
|
||||
gpuTsgHandle *tsgHandle);
|
||||
@@ -285,11 +281,14 @@ NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
|
||||
|
||||
NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(struct gpuDevice *device);
|
||||
|
||||
NV_STATUS nvGpuOpsTogglePrefetchFaults(gpuFaultInfo *pFaultInfo, NvBool bEnable);
|
||||
|
||||
// Interface used for CCSL
|
||||
|
||||
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
|
||||
gpuChannelHandle channel);
|
||||
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
|
||||
NV_STATUS nvGpuOpsCcslContextUpdate(struct ccslContext_t *ctx);
|
||||
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
|
||||
NvU8 direction);
|
||||
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
|
||||
@@ -322,5 +321,7 @@ NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
|
||||
NvU8 direction,
|
||||
NvU64 increment,
|
||||
NvU8 *iv);
|
||||
NV_STATUS nvGpuOpsLogDeviceEncryption(struct ccslContext_t *ctx,
|
||||
NvU32 bufferSize);
|
||||
|
||||
#endif /* _NV_GPU_OPS_H_*/
|
||||
|
||||
@@ -209,7 +209,7 @@ NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session,
|
||||
memset(platformInfo, 0, sizeof(*platformInfo));
|
||||
platformInfo->atsSupported = nv_ats_supported;
|
||||
|
||||
platformInfo->sevEnabled = os_cc_enabled;
|
||||
platformInfo->confComputingEnabled = os_cc_enabled;
|
||||
|
||||
status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session);
|
||||
|
||||
@@ -295,6 +295,7 @@ EXPORT_SYMBOL(nvUvmInterfaceDupAddressSpace);
|
||||
NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device,
|
||||
unsigned long long vaBase,
|
||||
unsigned long long vaSize,
|
||||
NvBool enableAts,
|
||||
uvmGpuAddressSpaceHandle *vaSpace,
|
||||
UvmGpuAddressSpaceInfo *vaSpaceInfo)
|
||||
{
|
||||
@@ -310,6 +311,7 @@ NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device,
|
||||
(gpuDeviceHandle)device,
|
||||
vaBase,
|
||||
vaSize,
|
||||
enableAts,
|
||||
(gpuAddressSpaceHandle *)vaSpace,
|
||||
vaSpaceInfo);
|
||||
|
||||
@@ -493,26 +495,6 @@ NV_STATUS nvUvmInterfacePmaPinPages(void *pPma,
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfacePmaPinPages);
|
||||
|
||||
NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU64 pageSize)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
NV_STATUS status;
|
||||
|
||||
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
||||
{
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
status = rm_gpu_ops_pma_unpin_pages(sp, pPma, pPages, pageCount, pageSize);
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfacePmaUnpinPages);
|
||||
|
||||
void nvUvmInterfaceMemoryFree(uvmGpuAddressSpaceHandle vaSpace,
|
||||
UvmGpuPointer gpuPointer)
|
||||
{
|
||||
@@ -1015,6 +997,18 @@ NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(uvmGpuDeviceHandle device)
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceFlushReplayableFaultBuffer);
|
||||
|
||||
NV_STATUS nvUvmInterfaceTogglePrefetchFaults(UvmGpuFaultInfo *pFaultInfo, NvBool bEnable)
|
||||
{
|
||||
nvidia_stack_t *sp = nvUvmGetSafeStack();
|
||||
NV_STATUS status;
|
||||
|
||||
status = rm_gpu_ops_toggle_prefetch_faults(sp, pFaultInfo, bEnable);
|
||||
|
||||
nvUvmFreeSafeStack(sp);
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceTogglePrefetchFaults);
|
||||
|
||||
NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device,
|
||||
UvmGpuAccessCntrInfo *pAccessCntrInfo)
|
||||
{
|
||||
@@ -1516,6 +1510,17 @@ void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext)
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslUpdateContext(UvmCslContext *uvmCslContext)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_context_update(sp, uvmCslContext->ctx);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslUpdateContext);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation)
|
||||
{
|
||||
@@ -1614,6 +1619,18 @@ NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslIncrementIv);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslLogExternalEncryption(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_log_device_encryption(sp, uvmCslContext->ctx, bufferSize);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslLogExternalEncryption);
|
||||
|
||||
#else // NV_UVM_ENABLE
|
||||
|
||||
NV_STATUS nv_uvm_suspend(void)
|
||||
|
||||
@@ -195,6 +195,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += crypto_tfm_ctx_aligned
|
||||
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
|
||||
@@ -215,6 +216,7 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_dram_num_channe
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dram_types
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_screen_info
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_screen_info
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_i2c_bus_status
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_fuse_control_read
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_get_platform
|
||||
|
||||
@@ -30,14 +30,18 @@
|
||||
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/cpuset.h>
|
||||
|
||||
#include <linux/pid.h>
|
||||
#if defined(CONFIG_LOCKDEP)
|
||||
#include <linux/lockdep.h>
|
||||
#endif // CONFIG_LOCKDEP
|
||||
|
||||
extern char *NVreg_TemporaryFilePath;
|
||||
|
||||
#define MAX_ERROR_STRING 512
|
||||
#define MAX_ERROR_STRING 528
|
||||
static char nv_error_string[MAX_ERROR_STRING];
|
||||
nv_spinlock_t nv_error_string_lock;
|
||||
static NV_DEFINE_SPINLOCK(nv_error_string_lock);
|
||||
|
||||
extern nv_linux_state_t nv_ctl_device;
|
||||
|
||||
@@ -238,7 +242,20 @@ NV_STATUS NV_API_CALL os_release_semaphore
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
typedef struct rw_semaphore os_rwlock_t;
|
||||
typedef struct
|
||||
{
|
||||
struct rw_semaphore sem;
|
||||
|
||||
#if defined(CONFIG_LOCKDEP)
|
||||
/**
|
||||
* A key of lock class. It would be registered to Lockdep validator so all
|
||||
* instances' usages and dependencies will contribute to constructing correct
|
||||
* locking rules and this lock will be tracked by the Lockdep validator.
|
||||
*
|
||||
*/
|
||||
struct lock_class_key key;
|
||||
#endif // CONFIG_LOCKDEP
|
||||
} os_rwlock_t;
|
||||
|
||||
void* NV_API_CALL os_alloc_rwlock(void)
|
||||
{
|
||||
@@ -247,11 +264,17 @@ void* NV_API_CALL os_alloc_rwlock(void)
|
||||
NV_STATUS rmStatus = os_alloc_mem((void *)&os_rwlock, sizeof(os_rwlock_t));
|
||||
if (rmStatus != NV_OK)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate rw_semaphore!\n");
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate a struct os_rwlock_t!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
init_rwsem(os_rwlock);
|
||||
init_rwsem(&os_rwlock->sem);
|
||||
|
||||
#if defined(CONFIG_LOCKDEP)
|
||||
// Register the dynamically allocated key to Lockdep.
|
||||
lockdep_register_key(&os_rwlock->key);
|
||||
lockdep_set_class(&os_rwlock->sem, &os_rwlock->key);
|
||||
#endif // CONFIG_LOCKDEP
|
||||
|
||||
return os_rwlock;
|
||||
}
|
||||
@@ -259,6 +282,12 @@ void* NV_API_CALL os_alloc_rwlock(void)
|
||||
void NV_API_CALL os_free_rwlock(void *pRwLock)
|
||||
{
|
||||
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
|
||||
|
||||
#if defined(CONFIG_LOCKDEP)
|
||||
// Unregister the dynamically allocated key.
|
||||
lockdep_unregister_key(&os_rwlock->key);
|
||||
#endif // CONFIG_LOCKDEP
|
||||
|
||||
os_free_mem(os_rwlock);
|
||||
}
|
||||
|
||||
@@ -270,7 +299,7 @@ NV_STATUS NV_API_CALL os_acquire_rwlock_read(void *pRwLock)
|
||||
{
|
||||
return NV_ERR_INVALID_REQUEST;
|
||||
}
|
||||
down_read(os_rwlock);
|
||||
down_read(&os_rwlock->sem);
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
@@ -282,7 +311,7 @@ NV_STATUS NV_API_CALL os_acquire_rwlock_write(void *pRwLock)
|
||||
{
|
||||
return NV_ERR_INVALID_REQUEST;
|
||||
}
|
||||
down_write(os_rwlock);
|
||||
down_write(&os_rwlock->sem);
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
@@ -290,7 +319,7 @@ NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read(void *pRwLock)
|
||||
{
|
||||
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
|
||||
|
||||
if (down_read_trylock(os_rwlock))
|
||||
if (down_read_trylock(&os_rwlock->sem))
|
||||
{
|
||||
return NV_ERR_TIMEOUT_RETRY;
|
||||
}
|
||||
@@ -302,7 +331,7 @@ NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write(void *pRwLock)
|
||||
{
|
||||
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
|
||||
|
||||
if (down_write_trylock(os_rwlock))
|
||||
if (down_write_trylock(&os_rwlock->sem))
|
||||
{
|
||||
return NV_ERR_TIMEOUT_RETRY;
|
||||
}
|
||||
@@ -313,13 +342,13 @@ NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write(void *pRwLock)
|
||||
void NV_API_CALL os_release_rwlock_read(void *pRwLock)
|
||||
{
|
||||
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
|
||||
up_read(os_rwlock);
|
||||
up_read(&os_rwlock->sem);
|
||||
}
|
||||
|
||||
void NV_API_CALL os_release_rwlock_write(void *pRwLock)
|
||||
{
|
||||
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
|
||||
up_write(os_rwlock);
|
||||
up_write(&os_rwlock->sem);
|
||||
}
|
||||
|
||||
NvBool NV_API_CALL os_semaphore_may_sleep(void)
|
||||
@@ -343,11 +372,6 @@ NvBool NV_API_CALL os_allow_priority_override(void)
|
||||
return capable(CAP_SYS_NICE);
|
||||
}
|
||||
|
||||
NvU64 NV_API_CALL os_get_num_phys_pages(void)
|
||||
{
|
||||
return (NvU64)NV_NUM_PHYSPAGES;
|
||||
}
|
||||
|
||||
char* NV_API_CALL os_string_copy(
|
||||
char *dst,
|
||||
const char *src
|
||||
@@ -782,8 +806,6 @@ inline void NV_API_CALL out_string(const char *str)
|
||||
printk("%s", str);
|
||||
}
|
||||
|
||||
#define NV_PRINT_LOCAL_BUFF_LEN_MAX 530
|
||||
|
||||
/*
|
||||
* nv_printf() prints to the kernel log for the driver.
|
||||
* Returns the number of characters written.
|
||||
@@ -792,38 +814,11 @@ int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...)
|
||||
{
|
||||
va_list arglist;
|
||||
int chars_written = 0;
|
||||
NvBool bForced = (NV_DBG_FORCE_LEVEL(debuglevel) == debuglevel);
|
||||
debuglevel = debuglevel & 0xff;
|
||||
|
||||
// This function is protected by the "_nv_dbg_lock" lock, so it is still
|
||||
// thread-safe to store the print buffer in a static variable, thus
|
||||
// avoiding a problem with kernel stack size.
|
||||
static char buff[NV_PRINT_LOCAL_BUFF_LEN_MAX];
|
||||
|
||||
/*
|
||||
* Print a message if:
|
||||
* 1. Caller indicates that filtering should be skipped, or
|
||||
* 2. debuglevel is at least cur_debuglevel for DBG_MODULE_OS (bits 4:5). Support for print
|
||||
* modules has been removed with DBG_PRINTF, so this check should be cleaned up.
|
||||
*/
|
||||
if (bForced ||
|
||||
(debuglevel >= ((cur_debuglevel >> 4) & 0x3)))
|
||||
if (debuglevel >= ((cur_debuglevel >> 4) & 0x3))
|
||||
{
|
||||
size_t loglevel_length = 0, format_length = strlen(printf_format);
|
||||
size_t length = 0;
|
||||
const char *loglevel = "";
|
||||
|
||||
switch (debuglevel)
|
||||
{
|
||||
case NV_DBG_INFO: loglevel = KERN_DEBUG; break;
|
||||
case NV_DBG_SETUP: loglevel = KERN_NOTICE; break;
|
||||
case NV_DBG_WARNINGS: loglevel = KERN_WARNING; break;
|
||||
case NV_DBG_ERRORS: loglevel = KERN_ERR; break;
|
||||
case NV_DBG_HW_ERRORS: loglevel = KERN_CRIT; break;
|
||||
case NV_DBG_FATAL: loglevel = KERN_CRIT; break;
|
||||
}
|
||||
|
||||
loglevel_length = strlen(loglevel);
|
||||
size_t length;
|
||||
unsigned long flags;
|
||||
|
||||
// When printk is called to extend the output of the previous line
|
||||
// (i.e. when the previous line did not end in \n), the printk call
|
||||
@@ -843,19 +838,22 @@ int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...)
|
||||
// string always contains only one \n (at the end) and NV_PRINTF_EX
|
||||
// is deleted. But that is unlikely to ever happen.
|
||||
|
||||
length = loglevel_length + format_length + sizeof(KERN_CONT);
|
||||
length = strlen(printf_format);
|
||||
if (length < 1)
|
||||
return 0;
|
||||
|
||||
NV_SPIN_LOCK_IRQSAVE(&nv_error_string_lock, flags);
|
||||
|
||||
// KERN_CONT changed in the 3.6 kernel, so we can't assume its
|
||||
// composition or size.
|
||||
memcpy(buff, KERN_CONT, sizeof(KERN_CONT) - 1);
|
||||
memcpy(buff + sizeof(KERN_CONT) - 1, loglevel, loglevel_length);
|
||||
memcpy(buff + sizeof(KERN_CONT) - 1 + loglevel_length, printf_format, length + 1);
|
||||
memcpy(nv_error_string, KERN_CONT, sizeof(KERN_CONT) - 1);
|
||||
memcpy(nv_error_string + sizeof(KERN_CONT) - 1, printf_format, length + 1);
|
||||
|
||||
va_start(arglist, printf_format);
|
||||
chars_written = vprintk(buff, arglist);
|
||||
chars_written = vprintk(nv_error_string, arglist);
|
||||
va_end(arglist);
|
||||
|
||||
NV_SPIN_UNLOCK_IRQRESTORE(&nv_error_string_lock, flags);
|
||||
}
|
||||
|
||||
return chars_written;
|
||||
@@ -1007,26 +1005,29 @@ void NV_API_CALL os_unmap_kernel_space(
|
||||
nv_iounmap(addr, size_bytes);
|
||||
}
|
||||
|
||||
// flush the cpu's cache, uni-processor version
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache(void)
|
||||
#if NVCPU_IS_AARCH64
|
||||
|
||||
static inline void nv_flush_cache_cpu(void *info)
|
||||
{
|
||||
CACHE_FLUSH();
|
||||
return NV_OK;
|
||||
if (!nvos_is_chipset_io_coherent())
|
||||
{
|
||||
#if defined(NV_FLUSH_CACHE_ALL_PRESENT)
|
||||
flush_cache_all();
|
||||
#else
|
||||
WARN_ONCE(0, "kernel does not provide flush_cache_all()\n");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
// flush the cache of all cpus
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache_all(void)
|
||||
{
|
||||
#if defined(NVCPU_AARCH64)
|
||||
CACHE_FLUSH_ALL();
|
||||
on_each_cpu(nv_flush_cache_cpu, NULL, 1);
|
||||
return NV_OK;
|
||||
#endif
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_flush_user_cache(void)
|
||||
{
|
||||
#if defined(NVCPU_AARCH64)
|
||||
if (!NV_MAY_SLEEP())
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
@@ -1037,16 +1038,27 @@ NV_STATUS NV_API_CALL os_flush_user_cache(void)
|
||||
// although it is possible. For now, just flush the entire cache to be
|
||||
// safe.
|
||||
//
|
||||
CACHE_FLUSH_ALL();
|
||||
on_each_cpu(nv_flush_cache_cpu, NULL, 1);
|
||||
return NV_OK;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
#else // NVCPU_IS_AARCH64
|
||||
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache_all(void)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_flush_user_cache(void)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void NV_API_CALL os_flush_cpu_write_combine_buffer(void)
|
||||
{
|
||||
WRITE_COMBINE_FLUSH();
|
||||
wmb();
|
||||
}
|
||||
|
||||
// override initial debug level from registry
|
||||
@@ -1055,8 +1067,6 @@ void NV_API_CALL os_dbg_init(void)
|
||||
NvU32 new_debuglevel;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
|
||||
NV_SPIN_LOCK_INIT(&nv_error_string_lock);
|
||||
|
||||
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
||||
{
|
||||
return;
|
||||
@@ -1082,7 +1092,7 @@ void NV_API_CALL os_dbg_set_level(NvU32 new_debuglevel)
|
||||
|
||||
NvU64 NV_API_CALL os_get_max_user_va(void)
|
||||
{
|
||||
return TASK_SIZE;
|
||||
return TASK_SIZE;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_schedule(void)
|
||||
@@ -1271,9 +1281,12 @@ void NV_API_CALL os_get_screen_info(
|
||||
* SYSFB_SIMPLEFB registers a dummy framebuffer which does not contain the
|
||||
* information required by os_get_screen_info(), therefore you need to
|
||||
* fall back onto the screen_info structure.
|
||||
*
|
||||
* After commit b8466fe82b79 ("efi: move screen_info into efi init code")
|
||||
* in v6.7, 'screen_info' is exported as GPL licensed symbol for ARM64.
|
||||
*/
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_screen_info
|
||||
#if NV_CHECK_EXPORT_SYMBOL(screen_info)
|
||||
/*
|
||||
* If there is not a framebuffer console, return 0 size.
|
||||
*
|
||||
@@ -1714,7 +1727,7 @@ NV_STATUS NV_API_CALL os_alloc_pages_node
|
||||
* instead).
|
||||
*
|
||||
* 6. (Optional) __GFP_RECLAIM: Used to allow/forbid reclaim.
|
||||
* This is part of GFP_USER and consequently
|
||||
* This is part of GFP_USER and consequently
|
||||
* GFP_HIGHUSER_MOVABLE.
|
||||
*
|
||||
* Some of these flags are relatively more recent, with the last of them
|
||||
@@ -2330,6 +2343,37 @@ NV_STATUS NV_API_CALL os_numa_add_gpu_memory
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/*
|
||||
* On systems with cpuset cgroup controller enabled, memory alloc on
|
||||
* this just hotplugged GPU memory node can fail if the
|
||||
* cpuset_hotplug_work is not scheduled yet. cpuset_hotplug_work is
|
||||
* where the current->mems_allowed is updated in the path
|
||||
* cpuset_hotplug_workfn->update_tasks_nodemask. When cpuset is
|
||||
* enabled and current->mems_allowed is not updated, memory allocation
|
||||
* with __GFP_THISNODE and this node id fails. cpuset_wait_for_hotplug
|
||||
* kernel function can be used to wait for the work to finish but that
|
||||
* is not exported. Adding a time loop to wait for
|
||||
* current->mems_allowed to be updated as a WAR while an upstream
|
||||
* kernel fix is being explored. Bug 4385903
|
||||
*/
|
||||
if (!node_isset(node, cpuset_current_mems_allowed))
|
||||
{
|
||||
unsigned long delay;
|
||||
|
||||
delay = jiffies + (HZ / 10); // 100ms
|
||||
while(time_before(jiffies, delay) &&
|
||||
!node_isset(node, cpuset_current_mems_allowed))
|
||||
{
|
||||
os_schedule();
|
||||
}
|
||||
|
||||
if (!node_isset(node, cpuset_current_mems_allowed))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Hotplugged GPU memory NUMA node: %d "
|
||||
"not set in current->mems_allowed!\n", node);
|
||||
}
|
||||
}
|
||||
|
||||
*nodeId = node;
|
||||
clear_bit(node, nvl->coherent_link_info.free_node_bitmap);
|
||||
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_ONLINE);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -243,7 +243,7 @@ NV_STATUS NV_API_CALL os_lock_user_pages(
|
||||
|
||||
nv_mmap_read_lock(mm);
|
||||
ret = NV_PIN_USER_PAGES((unsigned long)address,
|
||||
page_count, gup_flags, user_pages, NULL);
|
||||
page_count, gup_flags, user_pages);
|
||||
nv_mmap_read_unlock(mm);
|
||||
pinned = ret;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user