mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-01-27 03:29:47 +00:00
580.65.06
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* Copyright 2021-2024 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
@@ -35,10 +35,10 @@ extern bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *leng
|
||||
* @param[in, out] subject_size The size in bytes of the cert_subject buffer on input,
|
||||
* and the size of buffer returned cert_subject on output.
|
||||
*
|
||||
* @retval true The certificate subject retrieved successfully.
|
||||
* @retval false Invalid certificate, or the subject_size is too small for the result.
|
||||
* The subject_size will be updated with the required size.
|
||||
* @retval false This interface is not supported.
|
||||
* @retval true If the subject_size is not equal 0. The certificate subject retrieved successfully.
|
||||
* @retval true If the subject_size is equal 0. The certificate parse successful. But the cert doesn't have subject.
|
||||
* @retval false If the subject_size is not equal 0. The certificate subject retrieved successfully.But the subject_size is too small for the result.
|
||||
* @retval false If the subject_size is equal 0. Invalid certificate.
|
||||
**/
|
||||
extern bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *cert_subject,
|
||||
@@ -80,6 +80,25 @@ extern bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size
|
||||
uint8_t *serial_number,
|
||||
size_t *serial_number_size);
|
||||
|
||||
#if LIBSPDM_ADDITIONAL_CHECK_CERT
|
||||
/**
|
||||
* Retrieve the signature algorithm from one X.509 certificate.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] oid Signature algorithm Object identifier buffer.
|
||||
* @param[in,out] oid_size Signature algorithm Object identifier buffer size.
|
||||
*
|
||||
* @retval true if the oid_size is equal 0, the cert parse successfully, but cert doesn't have signature algo.
|
||||
* @retval true if the oid_size is not equal 0, the cert parse and get signature algo successfully.
|
||||
* @retval false if the oid_size is equal 0, the cert parse failed.
|
||||
* @retval false if the oid_size is not equal 0, the cert parse and get signature algo successfully, but the input buffer size is small.
|
||||
**/
|
||||
extern bool libspdm_x509_get_signature_algorithm(const uint8_t *cert,
|
||||
size_t cert_size, uint8_t *oid,
|
||||
size_t *oid_size);
|
||||
#endif /* LIBSPDM_ADDITIONAL_CHECK_CERT */
|
||||
|
||||
/**
|
||||
* Retrieve the issuer bytes from one X.509 certificate.
|
||||
*
|
||||
@@ -93,10 +112,10 @@ extern bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size
|
||||
* @param[in, out] issuer_size The size in bytes of the cert_issuer buffer on input,
|
||||
* and the size of buffer returned cert_issuer on output.
|
||||
*
|
||||
* @retval true The certificate issuer retrieved successfully.
|
||||
* @retval false Invalid certificate, or the issuer_size is too small for the result.
|
||||
* The issuer_size will be updated with the required size.
|
||||
* @retval false This interface is not supported.
|
||||
* @retval true If the issuer_size is not equal 0. The certificate issuer retrieved successfully.
|
||||
* @retval true If the issuer_size is equal 0. The certificate parse successful. But the cert doesn't have issuer.
|
||||
* @retval false If the issuer_size is not equal 0. The certificate issuer retrieved successfully. But the issuer_size is too small for the result.
|
||||
* @retval false If the issuer_size is equal 0. Invalid certificate.
|
||||
**/
|
||||
extern bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *cert_issuer,
|
||||
@@ -112,8 +131,11 @@ extern bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
|
||||
* @param[out] extension_data Extension bytes.
|
||||
* @param[in, out] extension_data_size Extension bytes size.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
* @retval true If the returned extension_data_size == 0, it means that cert and oid are valid, but the oid extension is not found;
|
||||
* If the returned extension_data_size != 0, it means that cert and oid are valid, and the oid extension is found;
|
||||
* @retval false If the returned extension_data_size == 0, it means that cert or oid are invalid;
|
||||
* If the returned extension_data_size != 0, it means that cert and oid are valid, and the oid extension is found,
|
||||
* but the store buffer is too small.
|
||||
**/
|
||||
extern bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
|
||||
const uint8_t *oid, size_t oid_size,
|
||||
@@ -137,9 +159,14 @@ extern bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_siz
|
||||
* Note: libspdm_x509_compare_date_time to compare date_time oject
|
||||
* x509SetDateTime to get a date_time object from a date_time_str
|
||||
*
|
||||
* @retval true The certificate Validity retrieved successfully.
|
||||
* @retval false Invalid certificate, or Validity retrieve failed.
|
||||
* @retval false This interface is not supported.
|
||||
* @retval true if the from_size and from_size are not equal 0.
|
||||
* The certificate Validity retrieved successfully.
|
||||
* @retval true if the from_size and from_size are equal 0.
|
||||
* The certificate Validity does not exist.
|
||||
* @retval false if the from_size and from_size are not equal 0.
|
||||
* The certificate Validity retrieved successfully, but the input buffer size is small.
|
||||
* @retval false if the from_size and from_size are equal 0.
|
||||
* Invalid certificate, or Validity retrieve failed.
|
||||
**/
|
||||
extern bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *from, size_t *from_size, uint8_t *to,
|
||||
@@ -189,9 +216,9 @@ extern int32_t libspdm_x509_compare_date_time(const void *date_time1, const void
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] usage Key usage (LIBSPDM_CRYPTO_X509_KU_*)
|
||||
*
|
||||
* @retval true The certificate key usage retrieved successfully.
|
||||
* @retval false Invalid certificate, or usage is NULL
|
||||
* @retval false This interface is not supported.
|
||||
* @retval true if the usage is no equal 0. The certificate key usage retrieved successfully.
|
||||
* @retval true if the usage is equal 0. The certificate parse successfully, but the cert doesn't have key usage.
|
||||
* @retval false Invalid certificate, or usage is NULL.
|
||||
**/
|
||||
extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, size_t *usage);
|
||||
|
||||
@@ -203,8 +230,11 @@ extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, si
|
||||
* @param[out] usage Key usage bytes.
|
||||
* @param[in, out] usage_size Key usage buffer size in bytes.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
* @retval true If the returned usage_size == 0, it means that cert and oid are valid, but the Extended key usage is not found;
|
||||
* If the returned usage_size != 0, it means that cert and oid are valid, and the Extended key usage is found;
|
||||
* @retval false If the returned usage_size == 0, it means that cert or oid are invalid;
|
||||
* If the returned usage_size != 0, it means that cert and oid are valid, and the Extended key usage is found,
|
||||
* but the store buffer is too small.
|
||||
**/
|
||||
extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
|
||||
size_t cert_size, uint8_t *usage,
|
||||
@@ -218,8 +248,11 @@ extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
|
||||
* @param[out] basic_constraints Basic constraints bytes.
|
||||
* @param[in, out] basic_constraints_size Basic constraints buffer size in bytes.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
* @retval true If the returned basic_constraints_size == 0, it means that cert and oid are valid, but the basic_constraints is not found;
|
||||
* If the returned basic_constraints_size != 0, it means that cert and oid are valid, and the basic_constraints is found;
|
||||
* @retval false If the returned basic_constraints_size == 0, it means that cert or oid are invalid;
|
||||
* If the returned basic_constraints_size != 0, it means that cert and oid are valid, and the basic_constraints is found,
|
||||
* but the store buffer is too small.
|
||||
**/
|
||||
extern bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
|
||||
size_t cert_size,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* Copyright 2021-2024 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
@@ -21,14 +21,6 @@
|
||||
#define LIBSPDM_DEBUG_ASSERT_ENABLE (LIBSPDM_DEBUG_ENABLE)
|
||||
#define LIBSPDM_DEBUG_PRINT_ENABLE (LIBSPDM_DEBUG_ENABLE)
|
||||
#define LIBSPDM_DEBUG_BLOCK_ENABLE (LIBSPDM_DEBUG_ENABLE)
|
||||
#elif defined(MDEPKG_NDEBUG)
|
||||
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
|
||||
#undef LIBSPDM_DEBUG_PRINT_ENABLE
|
||||
#undef LIBSPDM_DEBUG_BLOCK_ENABLE
|
||||
|
||||
#define LIBSPDM_DEBUG_ASSERT_ENABLE 0
|
||||
#define LIBSPDM_DEBUG_PRINT_ENABLE 0
|
||||
#define LIBSPDM_DEBUG_BLOCK_ENABLE 0
|
||||
#endif /* defined(LIBSPDM_DEBUG_ENABLE) */
|
||||
|
||||
/*when in FIPS mode, only support approved algo in FIPS */
|
||||
|
||||
@@ -59,15 +59,15 @@
|
||||
#define LIBSPDM_CRYPTO_NID_SM4_128_GCM 0x0304
|
||||
|
||||
/* X.509 v3 key usage extension flags. */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_DIGITAL_SIGNATURE 0x80 /* bit 0 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_NON_REPUDIATION 0x40 /* bit 1 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_KEY_ENCIPHERMENT 0x20 /* bit 2 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_DATA_ENCIPHERMENT 0x10 /* bit 3 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_KEY_AGREEMENT 0x08 /* bit 4 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_KEY_CERT_SIGN 0x04 /* bit 5 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_CRL_SIGN 0x02 /* bit 6 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_ENCIPHER_ONLY 0x01 /* bit 7 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_DECIPHER_ONLY 0x8000 /* bit 8 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_DIGITAL_SIGNATURE 0x80
|
||||
#define LIBSPDM_CRYPTO_X509_KU_NON_REPUDIATION 0x40
|
||||
#define LIBSPDM_CRYPTO_X509_KU_KEY_ENCIPHERMENT 0x20
|
||||
#define LIBSPDM_CRYPTO_X509_KU_DATA_ENCIPHERMENT 0x10
|
||||
#define LIBSPDM_CRYPTO_X509_KU_KEY_AGREEMENT 0x08
|
||||
#define LIBSPDM_CRYPTO_X509_KU_KEY_CERT_SIGN 0x04
|
||||
#define LIBSPDM_CRYPTO_X509_KU_CRL_SIGN 0x02
|
||||
#define LIBSPDM_CRYPTO_X509_KU_ENCIPHER_ONLY 0x01
|
||||
#define LIBSPDM_CRYPTO_X509_KU_DECIPHER_ONLY 0x8000
|
||||
|
||||
/* These constants comply with the DER encoded ASN.1 type tags. */
|
||||
#define LIBSPDM_CRYPTO_ASN1_BOOLEAN 0x01
|
||||
|
||||
@@ -1,12 +1,135 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* Copyright 2021-2024 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef SPDM_LIB_CONFIG_H
|
||||
#define SPDM_LIB_CONFIG_H
|
||||
|
||||
/* Code space optimization for optional messages.
|
||||
*
|
||||
* An Integrator of libspdm may not need all of the optional SPDM messages. The
|
||||
* LIBSPDM_ENABLE_CAPABILITY_***_CAP compile time switches allow the Integrator to enable or disable
|
||||
* capabilities and messages.
|
||||
*/
|
||||
|
||||
/* SPDM 1.0 capabilities and messages. */
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CERT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CERT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_VENDOR_DEFINED_MESSAGES
|
||||
#define LIBSPDM_ENABLE_VENDOR_DEFINED_MESSAGES 1
|
||||
#endif
|
||||
|
||||
/* SPDM 1.1 capabilities. */
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_PSK_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_PSK_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP 1
|
||||
#endif
|
||||
|
||||
/* SPDM 1.2 capabilities. */
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CSR_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CSR_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP 1
|
||||
#endif
|
||||
|
||||
/* SPDM 1.3 capabilities. */
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_MEL_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_MEL_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_EVENT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_EVENT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_GET_KEY_PAIR_INFO_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_GET_KEY_PAIR_INFO_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_KEY_PAIR_INFO_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_SET_KEY_PAIR_INFO_CAP 1
|
||||
#endif
|
||||
|
||||
/* Includes SPDM 1.3 features for CSR messages. If enabled then LIBSPDM_ENABLE_CAPABILITY_CSR_CAP
|
||||
* must also be enabled.
|
||||
*/
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CSR_CAP_EX
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CSR_CAP_EX 1
|
||||
#endif
|
||||
|
||||
/* If 1 then endpoint supports sending GET_CERTIFICATE and GET_DIGESTS requests.
|
||||
* If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
|
||||
* must also be enabled.
|
||||
*/
|
||||
#ifndef LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT
|
||||
#define LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* If 1 then endpoint supports sending CHALLENGE request.
|
||||
* If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
|
||||
* must also be enabled.
|
||||
*/
|
||||
#ifndef LIBSPDM_SEND_CHALLENGE_SUPPORT
|
||||
#define LIBSPDM_SEND_CHALLENGE_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* If 1 then endpoint supports sending the GET_SUPPORTED_EVENT_TYPES, SUBSCRIBE_EVENT_TYPES, and
|
||||
* encapsulated EVENT_ACK messages. In addition, LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP must also be
|
||||
* 1.
|
||||
*/
|
||||
#ifndef LIBSPDM_EVENT_RECIPIENT_SUPPORT
|
||||
#define LIBSPDM_EVENT_RECIPIENT_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* When LIBSPDM_RESPOND_IF_READY_SUPPORT is 0 then
|
||||
* - For a Requester, if the Responder sends a ResponseNotReady ERROR response then the error
|
||||
* is immediately returned to the Integrator. The Requester cannot send a RESPOND_IF_READY
|
||||
* request.
|
||||
* - For a Responder, it cannot send a RESPOND_IF_READY ERROR response and does not support
|
||||
* RESPOND_IF_READY.
|
||||
* When LIBSPDM_RESPOND_IF_READY_SUPPORT is 1 then
|
||||
* - For a Requester, if the Responder sends a ResponseNotReady ERROR response then libspdm
|
||||
* waits an amount of time, as specified by the RDTExponent parameter, before sending
|
||||
* RESPOND_IF_READY.
|
||||
* - For a Responder, if its response state is NOT_READY then it will send a ResponseNotReady
|
||||
* ERROR response to the Requester, and will accept a subsequent RESPOND_IF_READY request.
|
||||
*/
|
||||
#ifndef LIBSPDM_RESPOND_IF_READY_SUPPORT
|
||||
#define LIBSPDM_RESPOND_IF_READY_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* Enables FIPS 140-3 mode. */
|
||||
#ifndef LIBSPDM_FIPS_MODE
|
||||
#define LIBSPDM_FIPS_MODE 0
|
||||
@@ -23,7 +146,7 @@
|
||||
#define LIBSPDM_DEBUG_ENABLE 1
|
||||
#endif
|
||||
|
||||
/* The SPDM specification allows a Responder to return up to 256 version entries in the `VERSION`
|
||||
/* The SPDM specification allows a Responder to return up to 255 version entries in the `VERSION`
|
||||
* response to the Requester, including duplicate entries. For a Requester this value specifies the
|
||||
* maximum number of entries that libspdm will tolerate in a `VERSION` response before returning an
|
||||
* error. A similar macro, `SPDM_MAX_VERSION_COUNT`, exists for the Responder. However this macro
|
||||
@@ -33,6 +156,7 @@
|
||||
#define LIBSPDM_MAX_VERSION_COUNT 5
|
||||
#endif
|
||||
|
||||
#if LIBSPDM_ENABLE_CAPABILITY_PSK_CAP
|
||||
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.RequesterContext` and,
|
||||
* if supported by the Responder, `PSK_EXCHANGE_RSP.ResponderContext` fields. The fields are
|
||||
* typically random or monotonically increasing numbers.
|
||||
@@ -40,10 +164,12 @@
|
||||
#ifndef LIBSPDM_PSK_CONTEXT_LENGTH
|
||||
#define LIBSPDM_PSK_CONTEXT_LENGTH LIBSPDM_MAX_HASH_SIZE
|
||||
#endif
|
||||
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.PSKHint` field.*/
|
||||
|
||||
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.PSKHint` field. */
|
||||
#ifndef LIBSPDM_PSK_MAX_HINT_LENGTH
|
||||
#define LIBSPDM_PSK_MAX_HINT_LENGTH 16
|
||||
#endif
|
||||
#endif /* LIBSPDM_ENABLE_CAPABILITY_PSK_CAP */
|
||||
|
||||
/* libspdm allows an Integrator to specify multiple root certificates as trust anchors when
|
||||
* verifying certificate chains from an endpoint. This value specifies the maximum number of root
|
||||
@@ -59,15 +185,18 @@
|
||||
#ifndef LIBSPDM_MAX_SESSION_COUNT
|
||||
#define LIBSPDM_MAX_SESSION_COUNT 4
|
||||
#endif
|
||||
|
||||
/* This value specifies the maximum size, in bytes, of a certificate chain that can be stored in a
|
||||
* libspdm context.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_CERT_CHAIN_SIZE
|
||||
#define LIBSPDM_MAX_CERT_CHAIN_SIZE 0x1000
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE
|
||||
#define LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE 0x1000
|
||||
#endif
|
||||
|
||||
/* Partial certificates can be retrieved from a Requester or Responder and through multiple messages
|
||||
* the complete certificate chain can be constructed. This value specifies the maximum size,
|
||||
* in bytes, of a partial certificate that can be sent or received.
|
||||
@@ -76,11 +205,24 @@
|
||||
#define LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN 1024
|
||||
#endif
|
||||
|
||||
/* Partial measurement extension log (MEL) can be retrieved from a Responder and through multiple
|
||||
* messages the complete MEL can be constructed. This value specifies the maximum size, in bytes, of
|
||||
* a partial MEL that can be sent or received.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_MEL_BLOCK_LEN
|
||||
#define LIBSPDM_MAX_MEL_BLOCK_LEN 1024
|
||||
#endif
|
||||
|
||||
/* To ensure integrity in communication between the Requester and the Responder libspdm calculates
|
||||
* cryptographic digests and signatures over multiple requests and responses. This value specifies
|
||||
* whether libspdm will use a running calculation over the transcript, where requests and responses
|
||||
* are discarded as they are cryptographically consumed, or whether libspdm will buffer the entire
|
||||
* transcript before calculating the digest or signature.
|
||||
*
|
||||
* When LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT is 0 then a running calculation is used and less
|
||||
* memory is needed.
|
||||
* When LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT is 1 then the entire transcript is buffered and more
|
||||
* memory is needed.
|
||||
*/
|
||||
#ifndef LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT
|
||||
#define LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT 0
|
||||
@@ -194,112 +336,12 @@
|
||||
#define LIBSPDM_SM3_256_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* This can be set to 0 for the device which does not need X509 parser.*/
|
||||
/* If 1 then endpoint supports parsing X.509 certificate chains. */
|
||||
#ifndef LIBSPDM_CERT_PARSE_SUPPORT
|
||||
#define LIBSPDM_CERT_PARSE_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* Code space optimization for Optional request/response messages.*/
|
||||
|
||||
/* Consumers of libspdm may wish to not fully implement all of the optional
|
||||
* SPDM request/response messages. Therefore we have provided these
|
||||
* SPDM_ENABLE_CAPABILITY_***_CAP compile time switches as an optimization
|
||||
* disable the code (#if 0) related to said optional capability, thereby
|
||||
* reducing the code space used in the image.*/
|
||||
|
||||
/* A single switch may enable/disable a single capability or group of related
|
||||
* capabilities.*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_CERT_CAP - Enable/Disable single CERT capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP - Enable/Disable single CHAL capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP - Enable/Disables multiple MEAS capabilities:
|
||||
* (MEAS_CAP_NO_SIG, MEAS_CAP_SIG, MEAS_FRESH_CAP)*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP - Enable/Disable single Key Exchange capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_PSK_CAP - Enable/Disable PSK_EX and PSK_FINISH.*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP - Enable/Disable mutual authentication.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP - Enable/Disable encapsulated message.*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_CSR_CAP - Enable/Disable get csr capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP - Enable/Disable set certificate capability. */
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CERT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CERT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_PSK_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_PSK_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CSR_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CSR_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP 1
|
||||
#endif
|
||||
|
||||
/* If 1 then endpoint supports sending GET_CERTIFICATE and GET_DIGESTS requests.
|
||||
* If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
|
||||
* must also be enabled.
|
||||
*/
|
||||
#ifndef LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT
|
||||
#define LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* If 1 then endpoint supports sending CHALLENGE request.
|
||||
* If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
|
||||
* must also be enabled.
|
||||
*/
|
||||
#ifndef LIBSPDM_SEND_CHALLENGE_SUPPORT
|
||||
#define LIBSPDM_SEND_CHALLENGE_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* When LIBSPDM_RESPOND_IF_READY_SUPPORT is 0 then
|
||||
* - For a Requester, if the Responder sends a ResponseNotReady ERROR response then the error
|
||||
* is immediately returned to the Integrator. The Requester cannot send a RESPOND_IF_READY
|
||||
* request.
|
||||
* - For a Responder, it cannot send a RESPOND_IF_READY ERROR response and does not support
|
||||
* RESPOND_IF_READY.
|
||||
* When LIBSPDM_RESPOND_IF_READY_SUPPORT is 1 then
|
||||
* - For a Requester, if the Responder sends a ResponseNotReady ERROR response then libspdm
|
||||
* waits an amount of time, as specified by the RDTExponent parameter, before sending
|
||||
* RESPOND_IF_READY.
|
||||
* - For a Responder, if its response state is NOT_READY then it will send a ResponseNotReady
|
||||
* ERROR response to the Requester, and will accept a subsequent RESPOND_IF_READY request.
|
||||
*/
|
||||
#ifndef LIBSPDM_RESPOND_IF_READY_SUPPORT
|
||||
#define LIBSPDM_RESPOND_IF_READY_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MinDataTransferSize = 42
|
||||
@@ -368,7 +410,8 @@
|
||||
|
||||
/* Enable message logging.
|
||||
* See https://github.com/DMTF/libspdm/blob/main/doc/user_guide.md#message-logging
|
||||
* for more information */
|
||||
* for more information.
|
||||
*/
|
||||
#ifndef LIBSPDM_ENABLE_MSG_LOG
|
||||
#define LIBSPDM_ENABLE_MSG_LOG 1
|
||||
#endif
|
||||
@@ -378,9 +421,25 @@
|
||||
#define LIBSPDM_CHECK_MACRO 0
|
||||
#endif
|
||||
|
||||
/* Enable checks to the SPDM context during runtime. */
|
||||
/* Enable compilation of libspdm_check_context function. After a libspdm context has been
|
||||
* configured libspdm_check_context can be called to check that its configuration is correct.
|
||||
*/
|
||||
#ifndef LIBSPDM_CHECK_SPDM_CONTEXT
|
||||
#define LIBSPDM_CHECK_SPDM_CONTEXT 1
|
||||
#endif
|
||||
|
||||
/* Enable passing the SPDM context to HAL functions.
|
||||
* This macro will be removed when libspdm 4.0 is released.
|
||||
*/
|
||||
#ifndef LIBSPDM_HAL_PASS_SPDM_CONTEXT
|
||||
#define LIBSPDM_HAL_PASS_SPDM_CONTEXT 0
|
||||
#endif
|
||||
|
||||
/* Enable additional checks for certificates.
|
||||
* This macro will be removed when libspdm 4.0 is released.
|
||||
*/
|
||||
#ifndef LIBSPDM_ADDITIONAL_CHECK_CERT
|
||||
#define LIBSPDM_ADDITIONAL_CHECK_CERT 0
|
||||
#endif
|
||||
|
||||
#endif /* SPDM_LIB_CONFIG_H */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -55,6 +55,232 @@ void libspdm_x509_stack_free(void *x509_stack)
|
||||
LIBSPDM_ASSERT(false);
|
||||
}
|
||||
|
||||
#ifdef USE_LKCA
|
||||
bool libspdm_encode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
|
||||
{
|
||||
static const uint8_t base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
size_t i;
|
||||
size_t tmp;
|
||||
size_t size;
|
||||
uint8_t *ptr = dst;
|
||||
|
||||
for (i = 0; (i + 2) < srclen; i += 3)
|
||||
{
|
||||
if (ptr - dst + 4 > *p_dstlen)
|
||||
{
|
||||
goto Exit;
|
||||
}
|
||||
tmp = (src[i] << 16) | (src[i+1] << 8) | (src[i+2]);
|
||||
*ptr++ = base64[(tmp >> 18) & 63];
|
||||
*ptr++ = base64[(tmp >> 12) & 63];
|
||||
*ptr++ = base64[(tmp >> 6) & 63];
|
||||
*ptr++ = base64[tmp & 63];
|
||||
}
|
||||
|
||||
// 1 byte extra
|
||||
if (i == srclen - 1)
|
||||
{
|
||||
if (ptr - dst + 4 > *p_dstlen)
|
||||
{
|
||||
goto Exit;
|
||||
}
|
||||
tmp = src[i] << 4;
|
||||
*ptr++ = base64[(tmp >> 6) & 63];
|
||||
*ptr++ = base64[tmp & 63];
|
||||
*ptr++ = '=';
|
||||
*ptr++ = '=';
|
||||
}
|
||||
|
||||
// 2 byte extra
|
||||
if (i == srclen - 2)
|
||||
{
|
||||
if (ptr - dst + 4 > *p_dstlen)
|
||||
{
|
||||
goto Exit;
|
||||
}
|
||||
tmp = ((src[i] << 8) | (src[i+1])) << 2;
|
||||
*ptr++ = base64[(tmp >> 12) & 63];
|
||||
*ptr++ = base64[(tmp >> 6) & 63];
|
||||
*ptr++ = base64[tmp & 63];
|
||||
*ptr++ = '=';
|
||||
}
|
||||
|
||||
*p_dstlen = ptr - dst;
|
||||
return true;
|
||||
Exit:
|
||||
*p_dstlen = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
typedef enum {
|
||||
BASE64_CONV_VALID,
|
||||
BASE64_CONV_PAD,
|
||||
BASE64_CONV_INVALID
|
||||
} BASE64_CONV;
|
||||
|
||||
static BASE64_CONV libspdm_decode_base64_chr(uint8_t b64_chr, uint8_t *value)
|
||||
{
|
||||
if (b64_chr >= 'A' && b64_chr <= 'Z')
|
||||
{
|
||||
*value = b64_chr - 'A';
|
||||
}
|
||||
else if (b64_chr >= 'a' && b64_chr <= 'z')
|
||||
{
|
||||
*value = b64_chr - 'a' + 26;
|
||||
}
|
||||
else if (b64_chr >= '0' && b64_chr <= '9')
|
||||
{
|
||||
*value = b64_chr -'0' + 52;
|
||||
}
|
||||
else if (b64_chr == '+' || b64_chr == '-')
|
||||
{
|
||||
*value = 62;
|
||||
}
|
||||
else if (b64_chr == '/' || b64_chr == '_')
|
||||
{
|
||||
*value = 63;
|
||||
}
|
||||
else if (b64_chr == '=')
|
||||
{
|
||||
*value = 0;
|
||||
return BASE64_CONV_PAD;
|
||||
}
|
||||
else
|
||||
{
|
||||
return BASE64_CONV_INVALID;
|
||||
}
|
||||
|
||||
return BASE64_CONV_VALID;
|
||||
}
|
||||
|
||||
static bool libspdm_decode_base64_stripped(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
|
||||
{
|
||||
const uint8_t *p_read;
|
||||
uint8_t *p_write;
|
||||
uint8_t i;
|
||||
uint8_t bytes;
|
||||
uint32_t bin_value;
|
||||
uint8_t char_value;
|
||||
|
||||
if (src == NULL || dst == NULL || srclen % 4 != 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
for (p_read = src, p_write = dst; p_read < src + srclen; p_read += 4)
|
||||
{
|
||||
for (i = 0, bytes = 3, bin_value = 0; i < 4; i++)
|
||||
{
|
||||
if (libspdm_decode_base64_chr(p_read[i], &char_value) == BASE64_CONV_PAD)
|
||||
{
|
||||
bytes--;
|
||||
// fallthrough
|
||||
bin_value <<= 6;
|
||||
bin_value |= char_value;
|
||||
}
|
||||
else if (libspdm_decode_base64_chr(p_read[i], &char_value) == BASE64_CONV_VALID)
|
||||
{
|
||||
bin_value <<= 6;
|
||||
bin_value |= char_value;
|
||||
}
|
||||
else
|
||||
{
|
||||
// attempting to decode an invalid character
|
||||
goto Exit;
|
||||
}
|
||||
}
|
||||
|
||||
if (p_write - dst + bytes > *p_dstlen)
|
||||
{
|
||||
// buffer too small
|
||||
goto Exit;
|
||||
}
|
||||
|
||||
switch (bytes)
|
||||
{
|
||||
case 3:
|
||||
*p_write++ = (bin_value & 0x00ff0000) >> 16;
|
||||
*p_write++ = (bin_value & 0x0000ff00) >> 8;
|
||||
*p_write++ = (bin_value & 0x000000ff);
|
||||
break;
|
||||
case 2:
|
||||
*p_write++ = (bin_value & 0x00ff0000) >> 16;
|
||||
*p_write++ = (bin_value & 0x0000ff00) >> 8;
|
||||
break;
|
||||
case 1:
|
||||
*p_write++ = (bin_value & 0x00ff0000) >> 16;
|
||||
break;
|
||||
default:
|
||||
// invalid state in base64
|
||||
goto Exit;
|
||||
}
|
||||
}
|
||||
*p_dstlen = p_write - dst;
|
||||
return true;
|
||||
Exit:
|
||||
*p_dstlen = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_decode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
|
||||
{
|
||||
size_t s_progress;
|
||||
size_t d_progress;
|
||||
size_t decode_size;
|
||||
size_t decoded_size;
|
||||
|
||||
// for each round we decode 64 bytes and skip the linebreaks
|
||||
for (s_progress = d_progress = 0; s_progress < srclen; s_progress += 65)
|
||||
{
|
||||
if (s_progress + 65 < srclen)
|
||||
{
|
||||
decode_size = 64;
|
||||
}
|
||||
else
|
||||
{
|
||||
// -1 to avoid decoding the '\n' byte in the end
|
||||
decode_size = srclen - s_progress - 1;
|
||||
}
|
||||
// calculate the size after decoding
|
||||
decoded_size = (decode_size / 4) * 3;
|
||||
if (src[decode_size - 1] == '=')
|
||||
{
|
||||
decoded_size--;
|
||||
}
|
||||
if (src[decoded_size - 2] == '=')
|
||||
{
|
||||
decoded_size--;
|
||||
}
|
||||
// break early if the buffer is too small
|
||||
if (*p_dstlen - d_progress < decoded_size)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (!libspdm_decode_base64_stripped(src + s_progress, dst + d_progress, decode_size, &decoded_size))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
d_progress += decoded_size;
|
||||
}
|
||||
if (s_progress < srclen)
|
||||
{
|
||||
*p_dstlen = 0;
|
||||
return false;
|
||||
}
|
||||
*p_dstlen = d_progress;
|
||||
return true;
|
||||
}
|
||||
#else // USE_LKCA
|
||||
bool libspdm_encode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_decode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif // USE_LKCA
|
||||
|
||||
static bool lkca_asn1_get_tag(uint8_t const *ptr, uint8_t const *end,
|
||||
size_t *length, uint32_t tag)
|
||||
{
|
||||
|
||||
@@ -26,3 +26,83 @@
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
#include <soc/tegra/bpmp-abi.h>
|
||||
#include <soc/tegra/bpmp.h>
|
||||
#endif // IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
|
||||
/*!
|
||||
* @brief Sends an MRQ (message-request) to BPMP
|
||||
*
|
||||
* The request, response, and ret parameters of this function correspond to the
|
||||
* components of the tegra_bpmp_message struct, which BPMP uses to receive
|
||||
* MRQs.
|
||||
*
|
||||
* @param[in] nv Per GPU Linux state
|
||||
* @param[in] mrq MRQ_xxx ID specifying what is requested
|
||||
* @param[in] request_data Pointer to request input data
|
||||
* @param[in] request_data_size Size of structure pointed to by pRequestData
|
||||
* @param[out] response_data Pointer to response output data
|
||||
* @param[in] response_data_size Size of structure pointed to by pResponseData
|
||||
* @param[out] ret MRQ return code (from "ret" element of
|
||||
* tegra_bpmp_message struct)
|
||||
* @param[out] api_ret Return code from tegra_bpmp_transfer call
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available,
|
||||
* NV_ERR_INVALID_POINTER if the tegra_bpmp struct pointer could not
|
||||
* be obtained from nv, or
|
||||
* NV_ERR_GENERIC if the tegra_bpmp_transfer call failed (see apiRet
|
||||
* for Linux error code).
|
||||
*/
|
||||
NV_STATUS NV_API_CALL
|
||||
nv_bpmp_send_mrq
|
||||
(
|
||||
nv_state_t *nv,
|
||||
NvU32 mrq,
|
||||
const void *request_data,
|
||||
NvU32 request_data_size,
|
||||
void *response_data,
|
||||
NvU32 response_data_size,
|
||||
NvS32 *ret,
|
||||
NvS32 *api_ret
|
||||
)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct tegra_bpmp *bpmp;
|
||||
struct tegra_bpmp_message msg;
|
||||
|
||||
bpmp = tegra_bpmp_get(nvl->dev);
|
||||
if (IS_ERR(bpmp))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Error getting bpmp struct: %s\n",
|
||||
PTR_ERR(bpmp));
|
||||
return NV_ERR_INVALID_POINTER;
|
||||
}
|
||||
|
||||
// Send the MRQ request to BPMP.
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.mrq = mrq;
|
||||
msg.tx.data = request_data;
|
||||
msg.tx.size = (size_t) request_data_size;
|
||||
msg.rx.data = response_data;
|
||||
msg.rx.size = (size_t) response_data_size;
|
||||
|
||||
*api_ret = (NvS32) tegra_bpmp_transfer(bpmp, &msg);
|
||||
|
||||
if (*api_ret == 0)
|
||||
{
|
||||
*ret = (NvS32) msg.rx.ret;
|
||||
return NV_OK;
|
||||
}
|
||||
else
|
||||
{
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -510,8 +510,6 @@ int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd)
|
||||
int dup_fd;
|
||||
struct inode *inode = NULL;
|
||||
dev_t rdev = 0;
|
||||
struct files_struct *files = current->files;
|
||||
struct fdtable *fdt;
|
||||
|
||||
if (cap == NULL)
|
||||
{
|
||||
@@ -543,29 +541,10 @@ int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd)
|
||||
goto err;
|
||||
}
|
||||
|
||||
dup_fd = NV_GET_UNUSED_FD_FLAGS(O_CLOEXEC);
|
||||
dup_fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (dup_fd < 0)
|
||||
{
|
||||
dup_fd = NV_GET_UNUSED_FD();
|
||||
if (dup_fd < 0)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set CLOEXEC before installing the FD.
|
||||
*
|
||||
* If fork() happens in between, the opened unused FD will have
|
||||
* a NULL struct file associated with it, which is okay.
|
||||
*
|
||||
* The only well known bug here is the race with dup(2), which is
|
||||
* already documented in the kernel, see fd_install()'s description.
|
||||
*/
|
||||
|
||||
spin_lock(&files->file_lock);
|
||||
fdt = files_fdtable(files);
|
||||
__set_bit(dup_fd, fdt->close_on_exec);
|
||||
spin_unlock(&files->file_lock);
|
||||
goto err;
|
||||
}
|
||||
|
||||
fd_install(dup_fd, file);
|
||||
@@ -582,6 +561,9 @@ err:
|
||||
void NV_API_CALL nv_cap_close_fd(int fd)
|
||||
{
|
||||
#if NV_FILESYSTEM_ACCESS_AVAILABLE
|
||||
struct file *file;
|
||||
NvBool is_nv_cap_fd;
|
||||
|
||||
if (fd == -1)
|
||||
{
|
||||
return;
|
||||
@@ -600,6 +582,30 @@ void NV_API_CALL nv_cap_close_fd(int fd)
|
||||
return;
|
||||
}
|
||||
|
||||
file = fget(fd);
|
||||
if (file == NULL)
|
||||
{
|
||||
task_unlock(current);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Make sure the fd belongs to the nv-cap-drv */
|
||||
is_nv_cap_fd = (file->f_op == &g_nv_cap_drv_fops);
|
||||
|
||||
fput(file);
|
||||
|
||||
/*
|
||||
* In some cases, we may be in shutdown path and execute
|
||||
* in context of unrelated process. In that case we should
|
||||
* not access any 'current' state, but instead let kernel
|
||||
* clean up capability files on its own.
|
||||
*/
|
||||
if (!is_nv_cap_fd)
|
||||
{
|
||||
task_unlock(current);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* From v4.17-rc1 (to v5.10.8) kernels have stopped exporting sys_close(fd)
|
||||
* and started exporting __close_fd, as of this commit:
|
||||
|
||||
@@ -27,13 +27,8 @@
|
||||
#include "nv-linux.h"
|
||||
#include "nv-platform.h"
|
||||
|
||||
#if defined(NV_SOC_TEGRA_BPMP_ABI_H_PRESENT)
|
||||
#include <soc/tegra/bpmp-abi.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_SOC_TEGRA_BPMP_H_PRESENT)
|
||||
#include <soc/tegra/bpmp.h>
|
||||
#endif
|
||||
|
||||
// Use the CCF APIs if enabled in Kernel config and RM build
|
||||
// has Dual license define enabled.
|
||||
@@ -59,6 +54,12 @@ static const char *osMapClk[] = {
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_DISP] = "nvdisplay_disp_clk",
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0] = "nvdisplay_p0_clk",
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_P1] = "nvdisplay_p1_clk",
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_P2] = "nvdisplay_p2_clk",
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_P3] = "nvdisplay_p3_clk",
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_P4] = "nvdisplay_p4_clk",
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_P5] = "nvdisplay_p5_clk",
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_P6] = "nvdisplay_p6_clk",
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_P7] = "nvdisplay_p7_clk",
|
||||
[TEGRASOC_WHICH_CLK_DPAUX0] = "dpaux0_clk",
|
||||
[TEGRASOC_WHICH_CLK_FUSE] = "fuse_clk",
|
||||
[TEGRASOC_WHICH_CLK_DSIPLL_VCO] = "dsipll_vco_clk",
|
||||
@@ -77,9 +78,21 @@ static const char *osMapClk[] = {
|
||||
[TEGRASOC_WHICH_CLK_VPLL0_REF] = "vpll0_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLL0] = "vpll0_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLL1] = "vpll1_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLL2] = "vpll2_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLL3] = "vpll3_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLL4] = "vpll4_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLL5] = "vpll5_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLL6] = "vpll6_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLL7] = "vpll7_clk",
|
||||
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF] = "nvdisplay_p0_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG0] = "rg0_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG1] = "rg1_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG2] = "rg2_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG3] = "rg3_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG4] = "rg4_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG5] = "rg5_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG6] = "rg6_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG7] = "rg7_clk",
|
||||
[TEGRASOC_WHICH_CLK_DISPPLL] = "disppll_clk",
|
||||
[TEGRASOC_WHICH_CLK_DISPHUBPLL] = "disphubpll_clk",
|
||||
[TEGRASOC_WHICH_CLK_DSI_LP] = "dsi_lp_clk",
|
||||
@@ -87,9 +100,20 @@ static const char *osMapClk[] = {
|
||||
[TEGRASOC_WHICH_CLK_DSI_PIXEL] = "dsi_pixel_clk",
|
||||
[TEGRASOC_WHICH_CLK_PRE_SOR0] = "pre_sor0_clk",
|
||||
[TEGRASOC_WHICH_CLK_PRE_SOR1] = "pre_sor1_clk",
|
||||
[TEGRASOC_WHICH_CLK_PRE_SOR2] = "pre_sor2_clk",
|
||||
[TEGRASOC_WHICH_CLK_PRE_SOR3] = "pre_sor3_clk",
|
||||
[TEGRASOC_WHICH_CLK_DP_LINKA_REF] = "dp_link_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_DP_LINKB_REF] = "dp_linkb_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_DP_LINKC_REF] = "dp_linkc_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_DP_LINKD_REF] = "dp_linkd_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT] = "sor_linka_input_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_LINKB_INPUT] = "sor_linkb_input_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_LINKC_INPUT] = "sor_linkc_input_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_LINKD_INPUT] = "sor_linkd_input_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO] = "sor_linka_afifo_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_LINKB_AFIFO] = "sor_linkb_afifo_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_LINKC_AFIFO] = "sor_linkc_afifo_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_LINKD_AFIFO] = "sor_linkd_afifo_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M] = "sor_linka_afifo_m_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG0_M] = "rg0_m_clk",
|
||||
[TEGRASOC_WHICH_CLK_RG1_M] = "rg1_m_clk",
|
||||
@@ -98,17 +122,36 @@ static const char *osMapClk[] = {
|
||||
[TEGRASOC_WHICH_CLK_PLLHUB] = "pllhub_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR0] = "sor0_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR1] = "sor1_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR2] = "sor2_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR3] = "sor3_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_PADA_INPUT] = "sor_pad_input_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_PADB_INPUT] = "sor_padb_input_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_PADC_INPUT] = "sor_padc_input_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR_PADD_INPUT] = "sor_padd_input_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR0_PAD] = "sor0_pad_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR1_PAD] = "sor1_pad_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR2_PAD] = "sor2_pad_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR3_PAD] = "sor3_pad_clk",
|
||||
[TEGRASOC_WHICH_CLK_PRE_SF0] = "pre_sf0_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF0] = "sf0_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF1] = "sf1_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF2] = "sf2_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF3] = "sf3_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF4] = "sf4_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF5] = "sf5_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF6] = "sf6_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF7] = "sf7_clk",
|
||||
[TEGRASOC_WHICH_CLK_DSI_PAD_INPUT] = "dsi_pad_input_clk",
|
||||
[TEGRASOC_WHICH_CLK_PRE_SOR0_REF] = "pre_sor0_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_PRE_SOR1_REF] = "pre_sor1_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR0_PLL_REF] = "sor0_ref_pll_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR1_PLL_REF] = "sor1_ref_pll_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR2_PLL_REF] = "sor2_ref_pll_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR3_PLL_REF] = "sor3_ref_pll_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR0_REF] = "sor0_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR1_REF] = "sor1_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR2_REF] = "sor2_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_SOR3_REF] = "sor3_ref_clk",
|
||||
[TEGRASOC_WHICH_CLK_OSC] = "osc_clk",
|
||||
[TEGRASOC_WHICH_CLK_DSC] = "dsc_clk",
|
||||
[TEGRASOC_WHICH_CLK_MAUD] = "maud_clk",
|
||||
@@ -122,6 +165,18 @@ static const char *osMapClk[] = {
|
||||
[TEGRASOC_WHICH_CLK_PLLA_DISP] = "plla_disp",
|
||||
[TEGRASOC_WHICH_CLK_PLLA_DISPHUB] = "plla_disphub",
|
||||
[TEGRASOC_WHICH_CLK_PLLA] = "plla",
|
||||
[TEGRASOC_WHICH_CLK_VPLLX_SOR0_MUXED] = "vpllx_sor0_muxed_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLLX_SOR1_MUXED] = "vpllx_sor1_muxed_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLLX_SOR2_MUXED] = "vpllx_sor2_muxed_clk",
|
||||
[TEGRASOC_WHICH_CLK_VPLLX_SOR3_MUXED] = "vpllx_sor3_muxed_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF0_SOR] = "sf0_sor_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF1_SOR] = "sf1_sor_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF2_SOR] = "sf2_sor_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF3_SOR] = "sf3_sor_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF4_SOR] = "sf4_sor_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF5_SOR] = "sf5_sor_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF6_SOR] = "sf6_sor_clk",
|
||||
[TEGRASOC_WHICH_CLK_SF7_SOR] = "sf7_sor_clk",
|
||||
[TEGRASOC_WHICH_CLK_EMC] = "emc_clk",
|
||||
[TEGRASOC_WHICH_CLK_GPU_SYS] = "sysclk",
|
||||
[TEGRASOC_WHICH_CLK_GPU_NVD] = "nvdclk",
|
||||
@@ -154,7 +209,7 @@ NV_STATUS NV_API_CALL nv_clk_get_handles(
|
||||
nv_state_t *nv)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT)
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
NvU32 i, j;
|
||||
int clk_count;
|
||||
@@ -745,7 +800,7 @@ NV_STATUS NV_API_CALL nv_dp_uphy_pll_init
|
||||
NvU32 lanes_bitmap
|
||||
)
|
||||
{
|
||||
#if defined(NV_SOC_TEGRA_BPMP_ABI_H_PRESENT) && defined(NV_CMD_UPHY_DISPLAY_PORT_INIT_PRESENT)
|
||||
#if defined(NV_CMD_UPHY_DISPLAY_PORT_INIT_PRESENT)
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct tegra_bpmp *bpmp;
|
||||
struct tegra_bpmp_message msg;
|
||||
@@ -791,7 +846,7 @@ NV_STATUS NV_API_CALL nv_dp_uphy_pll_init
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *nv)
|
||||
{
|
||||
#if defined(NV_SOC_TEGRA_BPMP_ABI_H_PRESENT) && defined(NV_CMD_UPHY_DISPLAY_PORT_OFF_PRESENT)
|
||||
#if defined(NV_CMD_UPHY_DISPLAY_PORT_OFF_PRESENT)
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct tegra_bpmp *bpmp;
|
||||
struct tegra_bpmp_message msg;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -61,16 +61,11 @@ static NV_STATUS nv_dma_map_contig(
|
||||
NvU64 *va
|
||||
)
|
||||
{
|
||||
#if defined(NV_DMA_MAP_PAGE_ATTRS_PRESENT) && defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT)
|
||||
*va = dma_map_page_attrs(dma_map->dev, dma_map->pages[0], 0,
|
||||
dma_map->page_count * PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL,
|
||||
(dma_map->cache_type == NV_MEMORY_UNCACHED) ?
|
||||
DMA_ATTR_SKIP_CPU_SYNC : 0);
|
||||
#else
|
||||
*va = dma_map_page(dma_map->dev, dma_map->pages[0], 0,
|
||||
dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
#endif
|
||||
if (dma_mapping_error(dma_map->dev, *va))
|
||||
{
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
@@ -95,16 +90,11 @@ static NV_STATUS nv_dma_map_contig(
|
||||
|
||||
static void nv_dma_unmap_contig(nv_dma_map_t *dma_map)
|
||||
{
|
||||
#if defined(NV_DMA_MAP_PAGE_ATTRS_PRESENT) && defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT)
|
||||
dma_unmap_page_attrs(dma_map->dev, dma_map->mapping.contig.dma_addr,
|
||||
dma_map->page_count * PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL,
|
||||
(dma_map->cache_type == NV_MEMORY_UNCACHED) ?
|
||||
DMA_ATTR_SKIP_CPU_SYNC : 0);
|
||||
#else
|
||||
dma_unmap_page(dma_map->dev, dma_map->mapping.contig.dma_addr,
|
||||
dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void nv_fill_scatterlist
|
||||
@@ -386,7 +376,7 @@ NV_STATUS NV_API_CALL nv_dma_map_sgt(
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
if (page_count > NV_NUM_PHYSPAGES)
|
||||
if (page_count > get_num_physpages())
|
||||
{
|
||||
NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev,
|
||||
"DMA mapping request too large!\n");
|
||||
@@ -467,7 +457,7 @@ static NV_STATUS NV_API_CALL nv_dma_map_pages(
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
if (page_count > NV_NUM_PHYSPAGES)
|
||||
if (page_count > get_num_physpages())
|
||||
{
|
||||
NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev,
|
||||
"DMA mapping request too large!\n");
|
||||
@@ -537,7 +527,7 @@ static NV_STATUS NV_API_CALL nv_dma_unmap_pages(
|
||||
|
||||
dma_map = *priv;
|
||||
|
||||
if (page_count > NV_NUM_PHYSPAGES)
|
||||
if (page_count > get_num_physpages())
|
||||
{
|
||||
NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev,
|
||||
"DMA unmapping request too large!\n");
|
||||
@@ -708,16 +698,13 @@ static NvBool nv_dma_use_map_resource
|
||||
nv_dma_device_t *dma_dev
|
||||
)
|
||||
{
|
||||
#if defined(NV_DMA_MAP_RESOURCE_PRESENT)
|
||||
const struct dma_map_ops *ops = get_dma_ops(dma_dev->dev);
|
||||
#endif
|
||||
|
||||
if (nv_dma_remap_peer_mmio == NV_DMA_REMAP_PEER_MMIO_DISABLE)
|
||||
{
|
||||
return NV_FALSE;
|
||||
}
|
||||
|
||||
#if defined(NV_DMA_MAP_RESOURCE_PRESENT)
|
||||
if (ops == NULL)
|
||||
{
|
||||
/* On pre-5.0 kernels, if dma_map_resource() is present, then we
|
||||
@@ -732,9 +719,32 @@ static NvBool nv_dma_use_map_resource
|
||||
}
|
||||
|
||||
return (ops->map_resource != NULL);
|
||||
#else
|
||||
return NV_FALSE;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* DMA-map a peer device's C2C aperture for peer access. */
|
||||
NV_STATUS NV_API_CALL nv_dma_map_non_pci_peer
|
||||
(
|
||||
nv_dma_device_t *dma_dev,
|
||||
NvU64 page_count,
|
||||
NvU64 *va
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
|
||||
if (nv_dma_use_map_resource(dma_dev))
|
||||
{
|
||||
status = nv_dma_map_mmio(dma_dev, page_count, va);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Best effort - can't map through the iommu but at least try to
|
||||
* use SPA as is.
|
||||
*/
|
||||
status = NV_OK;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* DMA-map a peer PCI device's BAR for peer access. */
|
||||
@@ -817,7 +827,6 @@ NV_STATUS NV_API_CALL nv_dma_map_mmio
|
||||
NvU64 *va
|
||||
)
|
||||
{
|
||||
#if defined(NV_DMA_MAP_RESOURCE_PRESENT)
|
||||
BUG_ON(!va);
|
||||
|
||||
if (nv_dma_use_map_resource(dma_dev))
|
||||
@@ -844,9 +853,6 @@ NV_STATUS NV_API_CALL nv_dma_map_mmio
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_dma_unmap_mmio
|
||||
@@ -856,13 +862,11 @@ void NV_API_CALL nv_dma_unmap_mmio
|
||||
NvU64 va
|
||||
)
|
||||
{
|
||||
#if defined(NV_DMA_MAP_RESOURCE_PRESENT)
|
||||
if (nv_dma_use_map_resource(dma_dev))
|
||||
{
|
||||
dma_unmap_resource(dma_dev->dev, va, page_count * PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -905,40 +909,16 @@ void NV_API_CALL nv_dma_cache_invalidate
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(NV_LINUX_DMA_BUF_H_PRESENT) && \
|
||||
defined(NV_DRM_AVAILABLE) && defined(NV_DRM_DRM_GEM_H_PRESENT)
|
||||
|
||||
/*
|
||||
* drm_gem_object_{get/put}() added by commit
|
||||
* e6b62714e87c8811d5564b6a0738dcde63a51774 (2017-02-28) and
|
||||
* drm_gem_object_{reference/unreference}() removed by commit
|
||||
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
|
||||
*/
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
static inline void
|
||||
nv_dma_gem_object_unreference_unlocked(struct drm_gem_object *gem)
|
||||
nv_dma_gem_object_put_unlocked(struct drm_gem_object *gem)
|
||||
{
|
||||
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
|
||||
|
||||
#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT)
|
||||
drm_gem_object_put_unlocked(gem);
|
||||
#else
|
||||
drm_gem_object_put(gem);
|
||||
#endif
|
||||
|
||||
#else
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
nv_dma_gem_object_reference(struct drm_gem_object *gem)
|
||||
{
|
||||
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
|
||||
drm_gem_object_get(gem);
|
||||
#else
|
||||
drm_gem_object_reference(gem);
|
||||
#endif
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_import_sgt
|
||||
@@ -967,7 +947,7 @@ NV_STATUS NV_API_CALL nv_dma_import_sgt
|
||||
|
||||
// Do nothing with SGT, it is already mapped and pinned by the exporter
|
||||
|
||||
nv_dma_gem_object_reference(gem);
|
||||
drm_gem_object_get(gem);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
@@ -986,7 +966,7 @@ void NV_API_CALL nv_dma_release_sgt
|
||||
// Do nothing with SGT, it will be unmapped and unpinned by the exporter
|
||||
WARN_ON(sgt == NULL);
|
||||
|
||||
nv_dma_gem_object_unreference_unlocked(gem);
|
||||
nv_dma_gem_object_put_unlocked(gem);
|
||||
|
||||
module_put(gem->dev->driver->fops->owner);
|
||||
}
|
||||
@@ -1010,4 +990,4 @@ void NV_API_CALL nv_dma_release_sgt
|
||||
)
|
||||
{
|
||||
}
|
||||
#endif /* NV_LINUX_DMA_BUF_H_PRESENT && NV_DRM_AVAILABLE && NV_DRM_DRM_GEM_H_PRESENT */
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -82,9 +82,6 @@ typedef struct nv_dma_buf_file_private
|
||||
// RM-private info for MIG configs
|
||||
void *mig_info;
|
||||
|
||||
// Flag to indicate if dma-buf mmap is allowed
|
||||
NvBool can_mmap;
|
||||
|
||||
//
|
||||
// Flag to indicate if phys addresses are static and can be
|
||||
// fetched during dma-buf create/reuse instead of in map.
|
||||
@@ -104,6 +101,42 @@ typedef struct nv_dma_buf_file_private
|
||||
// limitations. On such systems, IOMMU map/unmap will be skipped.
|
||||
//
|
||||
NvBool skip_iommu;
|
||||
|
||||
struct
|
||||
{
|
||||
// True if the map attributes are cached
|
||||
NvBool cached;
|
||||
|
||||
// Flag to indicate if dma-buf mmap is allowed
|
||||
NvBool can_mmap;
|
||||
|
||||
//
|
||||
// Flag to indicate if client/user is allowed dma-buf mmap or not.
|
||||
// That way user can enable mmap for testing/specific
|
||||
// use cases and not for any all handles.
|
||||
//
|
||||
NvU64 allow_mmap;
|
||||
|
||||
// RM-private info for cache type settings (cached/uncached/writecombined).
|
||||
NvU32 cache_type;
|
||||
|
||||
// Flag to indicate if dma-buf is RO or RW memory.
|
||||
NvBool read_only_mem;
|
||||
|
||||
// Memory type info: see nv_memory_type_t.
|
||||
nv_memory_type_t memory_type;
|
||||
} map_attrs;
|
||||
|
||||
//
|
||||
// Flag to indicate if all GPU locks to be acquired/released before/after calling
|
||||
// rm_dma_buf_dup_mem_handle().
|
||||
// nv_dma_buf_dup_mem_handles() acquires GPU lock only for calling pGPU
|
||||
// instance. However, it is not sufficient as per DupObject() SYSMEM's design
|
||||
// since it expects either all GPU locks to be acquired by the caller or
|
||||
// do not take any GPU locks. This flag is set to TRUE only for
|
||||
// ZERO_FB chips.
|
||||
//
|
||||
NvBool acquire_release_all_gpu_lock_on_dup;
|
||||
} nv_dma_buf_file_private_t;
|
||||
|
||||
static void
|
||||
@@ -281,6 +314,49 @@ unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
}
|
||||
|
||||
//
|
||||
// TODO: Temporary work around for SYSMEM Dup issue.
|
||||
// Take all GPU locks before calling the DupObject().
|
||||
// DupObject() requires the caller to either acquire all GPU locks beforehand or
|
||||
// refrain from acquiring any GPU locks before invoking it.
|
||||
// Otherwise DupObject() will fail for already locked gpu instance with below error print
|
||||
// for multi gpu instance use case:
|
||||
// "GPU lock already acquired by this thread" for gpuInst which is already locked during
|
||||
// nv_dma_buf_dup_mem_handles().
|
||||
// In TOT, nv_dma_buf_dup_mem_handles() acquires GPU lock only for calling pGPU
|
||||
// instance. However, it is not sufficient as per DupObject() SYSMEM's design since it expects
|
||||
// either all GPU locks to be acquired by the caller or do not take any GPU locks.
|
||||
// PDB_PROP_GPU_ZERO_FB chips (iGPU) doesn't have local memory. In this case,
|
||||
// SYSMEM is used as Device resources. priv->acquire_release_all_gpu_lock_on_dup flag set as
|
||||
// NV_TRUE only for PDB_PROP_GPU_ZERO_FB chips.
|
||||
//
|
||||
// Proper Fix (Bug 4866388):
|
||||
// The RS_FLAGS_ACQUIRE_RELAXED_GPUS_LOCK_ON_DUP flag was introduced to allow an
|
||||
// RM class to take GPU Group Lock if the source and the destination object
|
||||
// belongs to the same pGpu. Take all GPUs lock otherwise.
|
||||
// With above change, we are seeing test failures.
|
||||
// Until the above proper fix is added, we need to rely on temporary work around.
|
||||
//
|
||||
static inline NV_STATUS
|
||||
nv_dma_buf_acquire_gpu_lock(
|
||||
nvidia_stack_t *sp,
|
||||
nv_dma_buf_file_private_t *priv
|
||||
)
|
||||
{
|
||||
return (priv->acquire_release_all_gpu_lock_on_dup ?
|
||||
rm_acquire_all_gpus_lock(sp): rm_acquire_gpu_lock(sp, priv->nv));
|
||||
}
|
||||
|
||||
static inline NV_STATUS
|
||||
nv_dma_buf_release_gpu_lock(
|
||||
nvidia_stack_t *sp,
|
||||
nv_dma_buf_file_private_t *priv
|
||||
)
|
||||
{
|
||||
return (priv->acquire_release_all_gpu_lock_on_dup ?
|
||||
rm_release_all_gpus_lock(sp): rm_release_gpu_lock(sp, priv->nv));
|
||||
}
|
||||
|
||||
static NV_STATUS
|
||||
nv_dma_buf_dup_mem_handles(
|
||||
nvidia_stack_t *sp,
|
||||
@@ -299,7 +375,7 @@ nv_dma_buf_dup_mem_handles(
|
||||
return status;
|
||||
}
|
||||
|
||||
status = rm_acquire_gpu_lock(sp, priv->nv);
|
||||
status = nv_dma_buf_acquire_gpu_lock(sp, priv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto unlock_api_lock;
|
||||
@@ -309,6 +385,10 @@ nv_dma_buf_dup_mem_handles(
|
||||
{
|
||||
NvHandle h_memory_duped = 0;
|
||||
void *mem_info = NULL;
|
||||
nv_memory_type_t memory_type = NV_MEMORY_TYPE_SYSTEM;
|
||||
NvBool can_mmap;
|
||||
NvU32 cache_type;
|
||||
NvBool read_only_mem;
|
||||
|
||||
if (priv->handles[index].h_memory != 0)
|
||||
{
|
||||
@@ -332,12 +412,38 @@ nv_dma_buf_dup_mem_handles(
|
||||
params->offsets[i],
|
||||
params->sizes[i],
|
||||
&h_memory_duped,
|
||||
&mem_info);
|
||||
&mem_info,
|
||||
&can_mmap,
|
||||
&cache_type,
|
||||
&read_only_mem,
|
||||
&memory_type);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (priv->map_attrs.cached)
|
||||
{
|
||||
if ((can_mmap != priv->map_attrs.can_mmap) ||
|
||||
(cache_type != priv->map_attrs.cache_type) ||
|
||||
(read_only_mem != priv->map_attrs.read_only_mem) ||
|
||||
(memory_type != priv->map_attrs.memory_type))
|
||||
{
|
||||
// Creating mixed dma_buf is not supported.
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Store the handle's mmap, RO and cache type info.
|
||||
priv->map_attrs.can_mmap = can_mmap;
|
||||
priv->map_attrs.cache_type = cache_type;
|
||||
priv->map_attrs.read_only_mem = read_only_mem;
|
||||
priv->map_attrs.memory_type = memory_type;
|
||||
priv->map_attrs.cached = NV_TRUE;
|
||||
}
|
||||
|
||||
priv->attached_size += params->sizes[i];
|
||||
priv->handles[index].h_memory = h_memory_duped;
|
||||
priv->handles[index].offset = params->offsets[i];
|
||||
@@ -355,7 +461,7 @@ nv_dma_buf_dup_mem_handles(
|
||||
goto failed;
|
||||
}
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
nv_dma_buf_release_gpu_lock(sp, priv);
|
||||
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
@@ -364,7 +470,7 @@ nv_dma_buf_dup_mem_handles(
|
||||
failed:
|
||||
nv_dma_buf_undup_mem_handles_unlocked(sp, params->index, count, priv);
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
nv_dma_buf_release_gpu_lock(sp, priv);
|
||||
|
||||
unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
@@ -582,7 +688,7 @@ nv_dma_buf_unmap_pages(
|
||||
return;
|
||||
}
|
||||
|
||||
dma_unmap_sg(dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sg_attrs(dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -707,7 +813,7 @@ nv_dma_buf_map_pages (
|
||||
WARN_ON(sg != NULL);
|
||||
|
||||
// DMA map the sg_table
|
||||
rc = dma_map_sg(dev, sgt->sgl, sgt->orig_nents, DMA_BIDIRECTIONAL);
|
||||
rc = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (rc <= 0)
|
||||
{
|
||||
goto free_table;
|
||||
@@ -766,12 +872,13 @@ nv_dma_buf_map_pfns (
|
||||
|
||||
for (index = 0; index < range_count; index++)
|
||||
{
|
||||
NvU64 dma_addr = priv->handles[i].memArea.pRanges[index].start;
|
||||
NvU64 phys_addr = priv->handles[i].memArea.pRanges[index].start;
|
||||
NvU64 dma_len = priv->handles[i].memArea.pRanges[index].size;
|
||||
|
||||
// Break the scatterlist into dma_max_seg_size chunks
|
||||
while(dma_len != 0)
|
||||
{
|
||||
NvU64 dma_addr = phys_addr;
|
||||
NvU32 sg_len = NV_MIN(dma_len, dma_max_seg_size);
|
||||
|
||||
if (sg == NULL)
|
||||
@@ -781,8 +888,17 @@ nv_dma_buf_map_pfns (
|
||||
|
||||
if (!priv->skip_iommu)
|
||||
{
|
||||
status = nv_dma_map_peer(&peer_dma_dev, priv->nv->dma_dev, 0x1,
|
||||
(sg_len >> PAGE_SHIFT), &dma_addr);
|
||||
if (priv->nv->coherent)
|
||||
{
|
||||
status = nv_dma_map_non_pci_peer(&peer_dma_dev,
|
||||
(sg_len >> PAGE_SHIFT),
|
||||
&dma_addr);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = nv_dma_map_peer(&peer_dma_dev, priv->nv->dma_dev, 0x1,
|
||||
(sg_len >> PAGE_SHIFT), &dma_addr);
|
||||
}
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto unmap_pfns;
|
||||
@@ -792,7 +908,7 @@ nv_dma_buf_map_pfns (
|
||||
sg_set_page(sg, NULL, sg_len, 0);
|
||||
sg_dma_address(sg) = (dma_addr_t) dma_addr;
|
||||
sg_dma_len(sg) = sg_len;
|
||||
dma_addr += sg_len;
|
||||
phys_addr += sg_len;
|
||||
dma_len -= sg_len;
|
||||
mapped_nents++;
|
||||
sg = sg_next(sg);
|
||||
@@ -821,6 +937,70 @@ free_sgt:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nv_dma_buf_attach(
|
||||
struct dma_buf *buf,
|
||||
#if defined(NV_DMA_BUF_OPS_ATTACH_ARG2_DEV)
|
||||
struct device *dev,
|
||||
#endif
|
||||
struct dma_buf_attachment *attachment
|
||||
)
|
||||
{
|
||||
int rc = 0;
|
||||
nv_dma_buf_file_private_t *priv = buf->priv;
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
if (priv->mapping_type == NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE)
|
||||
{
|
||||
if(!nv_pci_is_valid_topology_for_direct_pci(priv->nv,
|
||||
to_pci_dev(attachment->dev)))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: dma-buf attach failed: "
|
||||
"topology not supported for mapping type FORCE_PCIE\n");
|
||||
rc = -ENOTSUPP;
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
priv->skip_iommu = NV_TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_dma_device_t peer_dma_dev = {{ 0 }};
|
||||
|
||||
peer_dma_dev.dev = &to_pci_dev(attachment->dev)->dev;
|
||||
peer_dma_dev.addressable_range.limit = to_pci_dev(attachment->dev)->dma_mask;
|
||||
|
||||
if (!nv_grdma_pci_topology_supported(priv->nv, &peer_dma_dev))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: dma-buf attach failed: "
|
||||
"PCI topology not supported for dma-buf\n");
|
||||
rc = -ENOTSUPP;
|
||||
goto unlock_priv;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER)
|
||||
if ((attachment->importer_ops != NULL) &&
|
||||
(!attachment->peer2peer) &&
|
||||
(!priv->nv->mem_has_struct_page))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: dma-buf attach failed: "
|
||||
"importer unable to handle MMIO without struct page\n");
|
||||
rc = -ENOTSUPP;
|
||||
goto unlock_priv;
|
||||
}
|
||||
#endif
|
||||
|
||||
unlock_priv:
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct sg_table*
|
||||
nv_dma_buf_map(
|
||||
struct dma_buf_attachment *attachment,
|
||||
@@ -832,36 +1012,8 @@ nv_dma_buf_map(
|
||||
struct dma_buf *buf = attachment->dmabuf;
|
||||
nv_dma_buf_file_private_t *priv = buf->priv;
|
||||
|
||||
//
|
||||
// On non-coherent platforms, and on coherent platforms requesting
|
||||
// PCIe mapping, importers must be able to handle peer MMIO resources
|
||||
// not backed by struct page.
|
||||
//
|
||||
#if defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER)
|
||||
if (((!priv->nv->coherent) ||
|
||||
(priv->mapping_type == NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE)) &&
|
||||
(attachment->importer_ops != NULL) &&
|
||||
!attachment->peer2peer)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: failed to map dynamic attachment with no P2P support\n");
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
if (priv->mapping_type == NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE)
|
||||
{
|
||||
if(!nv_pci_is_valid_topology_for_direct_pci(priv->nv, attachment->dev))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: topology not supported for mapping type FORCE_PCIE\n");
|
||||
return NULL;
|
||||
}
|
||||
priv->skip_iommu = NV_TRUE;
|
||||
}
|
||||
|
||||
if (priv->num_objects != priv->total_objects)
|
||||
{
|
||||
goto unlock_priv;
|
||||
@@ -880,7 +1032,7 @@ nv_dma_buf_map(
|
||||
// For MAPPING_TYPE_FORCE_PCIE on coherent platforms,
|
||||
// get the BAR1 PFN scatterlist instead of C2C pages.
|
||||
//
|
||||
if ((priv->nv->coherent) &&
|
||||
if (priv->nv->mem_has_struct_page &&
|
||||
(priv->mapping_type == NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT))
|
||||
{
|
||||
sgt = nv_dma_buf_map_pages(attachment->dev, priv);
|
||||
@@ -922,7 +1074,7 @@ nv_dma_buf_unmap(
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
if ((priv->nv->coherent) &&
|
||||
if (priv->nv->mem_has_struct_page &&
|
||||
(priv->mapping_type == NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT))
|
||||
{
|
||||
nv_dma_buf_unmap_pages(attachment->dev, sgt, priv);
|
||||
@@ -1007,15 +1159,186 @@ nv_dma_buf_mmap(
|
||||
struct vm_area_struct *vma
|
||||
)
|
||||
{
|
||||
// TODO: Check can_mmap flag
|
||||
int ret = 0;
|
||||
NvU32 i = 0;
|
||||
nv_dma_buf_file_private_t *priv = buf->priv;
|
||||
unsigned long addr = vma->vm_start;
|
||||
NvU32 total_skip_size = 0;
|
||||
NvU64 total_map_len = NV_VMA_SIZE(vma);
|
||||
NvU64 off_in_range_array = 0;
|
||||
NvU32 index;
|
||||
|
||||
return -ENOTSUPP;
|
||||
if (priv == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: nv_dma_buf_mmap: priv == NULL.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
if (!priv->map_attrs.can_mmap)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: nv_dma_buf_mmap: mmap is not allowed can_mmap[%d] \n",
|
||||
priv->map_attrs.can_mmap);
|
||||
ret = -ENOTSUPP;
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
// Check for offset overflow.
|
||||
if ((NV_VMA_OFFSET(vma) + NV_VMA_SIZE(vma)) < NV_VMA_OFFSET(vma))
|
||||
{
|
||||
ret = -EOVERFLOW;
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
if ((NV_VMA_OFFSET(vma) + NV_VMA_SIZE(vma)) > priv->total_size)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: nv_dma_buf_mmap: Vaddr_start[%llx] Vaddr_end[%llx] "
|
||||
"vm_pgoff[%llx] page_offset[%llx] "
|
||||
"page_prot[%x] total_size[%llx] \n",
|
||||
vma->vm_start, vma->vm_end, NV_VMA_PGOFF(vma),
|
||||
NV_VMA_OFFSET(vma), pgprot_val(vma->vm_page_prot),
|
||||
priv->total_size);
|
||||
ret = -EINVAL;
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"NVRM: nv_dma_buf_mmap: Vaddr_start[%llx] Vaddr_end[%llx] "
|
||||
"os_page_size[%llx] vm_pgoff[%llx] page_offset[%llx] "
|
||||
"page_prot[%x] total_size[%llx] total_map_len[%llx] \n",
|
||||
vma->vm_start, vma->vm_end, PAGE_SIZE, NV_VMA_PGOFF(vma),
|
||||
NV_VMA_OFFSET(vma), pgprot_val(vma->vm_page_prot), priv->total_size,
|
||||
total_map_len);
|
||||
|
||||
// Find the first range from which map should start.
|
||||
for (i = 0; i < priv->num_objects; i++)
|
||||
{
|
||||
NvU32 range_count = priv->handles[i].memArea.numRanges;
|
||||
|
||||
for (index = 0; index < range_count; index++)
|
||||
{
|
||||
NvU64 len = priv->handles[i].memArea.pRanges[index].size;
|
||||
|
||||
total_skip_size += len;
|
||||
//
|
||||
// Skip memArea.pRanges[index] until to find out the
|
||||
// first mapping page start in the memArea range_count.
|
||||
// skip pages which lie outside of offset/map length.
|
||||
//
|
||||
if (NV_VMA_OFFSET(vma) >= total_skip_size)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
total_skip_size -= len;
|
||||
|
||||
//
|
||||
// First mapping page start can be anywhere in the specific
|
||||
// memArea.pRanges[index]. So adjust off_in_range_array accordingly.
|
||||
//
|
||||
off_in_range_array = (NV_VMA_OFFSET(vma) - total_skip_size);
|
||||
total_skip_size += off_in_range_array;
|
||||
goto found_start_page;
|
||||
}
|
||||
}
|
||||
|
||||
// Could not find first map page.
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: [nv_dma_buf_mmap-failed] Could not find first map page \n");
|
||||
ret = -EINVAL;
|
||||
goto unlock_priv;
|
||||
|
||||
found_start_page:
|
||||
|
||||
// RO and cache type settings
|
||||
if (nv_encode_caching(&vma->vm_page_prot,
|
||||
priv->map_attrs.cache_type,
|
||||
priv->map_attrs.memory_type))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: [nv_dma_buf_mmap-failed] i[%u] cache_type[%llx] memory_type[%d] page_prot[%x] \n",
|
||||
i, priv->map_attrs.cache_type, priv->map_attrs.memory_type, pgprot_val(vma->vm_page_prot));
|
||||
ret = -ENXIO;
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
if (priv->map_attrs.read_only_mem)
|
||||
{
|
||||
vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot);
|
||||
nv_vm_flags_clear(vma, VM_WRITE);
|
||||
nv_vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
|
||||
nv_vm_flags_set(vma, VM_SHARED | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
|
||||
// Create user mapping
|
||||
for (; i < (priv->num_objects && (addr < vma->vm_end)); i++)
|
||||
{
|
||||
NvU32 range_count = priv->handles[i].memArea.numRanges;
|
||||
|
||||
for (; (index < range_count && (addr < vma->vm_end)); index++)
|
||||
{
|
||||
NvU64 len = priv->handles[i].memArea.pRanges[index].size;
|
||||
NvU64 map_len = 0;
|
||||
NvU64 phy_addr;
|
||||
|
||||
phy_addr = (priv->handles[i].memArea.pRanges[index].start + off_in_range_array);
|
||||
len -= off_in_range_array;
|
||||
|
||||
// Reset to 0, after its initial use.
|
||||
off_in_range_array = 0;
|
||||
|
||||
map_len = NV_MIN(len, total_map_len);
|
||||
|
||||
//
|
||||
// nv_remap_page_range() map a contiguous physical address space
|
||||
// into the user virtual space.
|
||||
// Use PFN based mapping api to create the mapping for
|
||||
// reserved carveout (OS invisible memory, not managed by OS) too.
|
||||
// Basically nv_remap_page_range() works for all kind of memory regions.
|
||||
// These are the downsides of using nv_remap_page_range()
|
||||
// 1. We can't use vm_insert_pages() batching API, so perf overhead to
|
||||
// map every page individually.
|
||||
// 2. We can't support use case to call pin_user_pages() on dma-buf's CPU VA.
|
||||
// We will revisit this code path in the future if needed.
|
||||
//
|
||||
ret = nv_remap_page_range(vma, addr, phy_addr, map_len,
|
||||
vma->vm_page_prot);
|
||||
if (ret)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: nv_dma_buf_mmap: remap_pfn_range - failed\n", ret);
|
||||
// Partial mapping is going to be freed by kernel if nv_dma_buf_mmap() fails.
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"NVRM: nv_dma_buf_mmap: index[%u] range_count[%u] Vaddr[%llx] "
|
||||
"page_prot[%x] phyAddr[%llx] mapLen[%llx] len[%llx] "
|
||||
"total_map_len[%llx] \n",
|
||||
index, range_count, addr, pgprot_val(vma->vm_page_prot), phy_addr,
|
||||
map_len, len, total_map_len);
|
||||
|
||||
total_map_len -= map_len;
|
||||
addr += map_len;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
unlock_priv:
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP) || \
|
||||
defined(NV_DMA_BUF_OPS_HAS_MAP)
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_MAP)
|
||||
static void*
|
||||
nv_dma_buf_kmap_stub(
|
||||
nv_dma_buf_map_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num
|
||||
)
|
||||
@@ -1024,7 +1347,7 @@ nv_dma_buf_kmap_stub(
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_kunmap_stub(
|
||||
nv_dma_buf_unmap_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num,
|
||||
void *addr
|
||||
@@ -1034,10 +1357,9 @@ nv_dma_buf_kunmap_stub(
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC) || \
|
||||
defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC)
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC)
|
||||
static void*
|
||||
nv_dma_buf_kmap_atomic_stub(
|
||||
nv_dma_buf_map_atomic_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num
|
||||
)
|
||||
@@ -1046,7 +1368,7 @@ nv_dma_buf_kmap_atomic_stub(
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_kunmap_atomic_stub(
|
||||
nv_dma_buf_unmap_atomic_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num,
|
||||
void *addr
|
||||
@@ -1062,29 +1384,19 @@ nv_dma_buf_kunmap_atomic_stub(
|
||||
// The actual implementations of these interfaces is not really required
|
||||
// for the export operation to work.
|
||||
//
|
||||
// Same functions are used for kmap*/map* because of this commit:
|
||||
// f9b67f0014cb: dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic
|
||||
//
|
||||
static const struct dma_buf_ops nv_dma_buf_ops = {
|
||||
.attach = nv_dma_buf_attach,
|
||||
.map_dma_buf = nv_dma_buf_map,
|
||||
.unmap_dma_buf = nv_dma_buf_unmap,
|
||||
.release = nv_dma_buf_release,
|
||||
.mmap = nv_dma_buf_mmap,
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP)
|
||||
.kmap = nv_dma_buf_kmap_stub,
|
||||
.kunmap = nv_dma_buf_kunmap_stub,
|
||||
#endif
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC)
|
||||
.kmap_atomic = nv_dma_buf_kmap_atomic_stub,
|
||||
.kunmap_atomic = nv_dma_buf_kunmap_atomic_stub,
|
||||
#endif
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_MAP)
|
||||
.map = nv_dma_buf_kmap_stub,
|
||||
.unmap = nv_dma_buf_kunmap_stub,
|
||||
.map = nv_dma_buf_map_stub,
|
||||
.unmap = nv_dma_buf_unmap_stub,
|
||||
#endif
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC)
|
||||
.map_atomic = nv_dma_buf_kmap_atomic_stub,
|
||||
.unmap_atomic = nv_dma_buf_kunmap_atomic_stub,
|
||||
.map_atomic = nv_dma_buf_map_atomic_stub,
|
||||
.unmap_atomic = nv_dma_buf_unmap_atomic_stub,
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -1118,12 +1430,12 @@ nv_dma_buf_create(
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
priv->total_objects = params->totalObjects;
|
||||
priv->total_size = params->totalSize;
|
||||
priv->nv = nv;
|
||||
priv->can_mmap = NV_FALSE;
|
||||
priv->mapping_type = params->mappingType;
|
||||
priv->skip_iommu = NV_FALSE;
|
||||
priv->total_objects = params->totalObjects;
|
||||
priv->total_size = params->totalSize;
|
||||
priv->nv = nv;
|
||||
priv->mapping_type = params->mappingType;
|
||||
priv->skip_iommu = NV_FALSE;
|
||||
priv->map_attrs.allow_mmap = params->bAllowMmap;
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
@@ -1147,7 +1459,8 @@ nv_dma_buf_create(
|
||||
&priv->h_device,
|
||||
&priv->h_subdevice,
|
||||
&priv->mig_info,
|
||||
&priv->static_phys_addrs);
|
||||
&priv->static_phys_addrs,
|
||||
&priv->acquire_release_all_gpu_lock_on_dup);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto cleanup_device;
|
||||
@@ -1159,6 +1472,20 @@ nv_dma_buf_create(
|
||||
goto cleanup_client_and_device;
|
||||
}
|
||||
|
||||
if (priv->map_attrs.allow_mmap &&
|
||||
!priv->map_attrs.can_mmap)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: mmap is not allowed for the specific handles\n");
|
||||
status = NV_ERR_NOT_SUPPORTED;
|
||||
goto cleanup_handles;
|
||||
}
|
||||
|
||||
// User can enable mmap for testing/specific use cases and not for any all handles.
|
||||
if (!priv->map_attrs.allow_mmap)
|
||||
{
|
||||
priv->map_attrs.can_mmap = NV_FALSE;
|
||||
}
|
||||
|
||||
// Get CPU static phys addresses if possible to do so at this time.
|
||||
if (priv->static_phys_addrs)
|
||||
{
|
||||
@@ -1170,24 +1497,17 @@ nv_dma_buf_create(
|
||||
}
|
||||
}
|
||||
|
||||
#if (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 1)
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &nv_dma_buf_ops;
|
||||
exp_info.size = params->totalSize;
|
||||
exp_info.flags = O_RDWR | O_CLOEXEC;
|
||||
exp_info.priv = priv;
|
||||
exp_info.ops = &nv_dma_buf_ops;
|
||||
exp_info.size = params->totalSize;
|
||||
exp_info.flags = O_RDWR | O_CLOEXEC;
|
||||
exp_info.priv = priv;
|
||||
exp_info.exp_name = "nv_dmabuf";
|
||||
|
||||
buf = dma_buf_export(&exp_info);
|
||||
}
|
||||
#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 4)
|
||||
buf = dma_buf_export(priv, &nv_dma_buf_ops,
|
||||
params->totalSize, O_RDWR | O_CLOEXEC);
|
||||
#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 5)
|
||||
buf = dma_buf_export(priv, &nv_dma_buf_ops,
|
||||
params->totalSize, O_RDWR | O_CLOEXEC, NULL);
|
||||
#endif
|
||||
|
||||
if (IS_ERR(buf))
|
||||
{
|
||||
@@ -1286,7 +1606,8 @@ nv_dma_buf_reuse(
|
||||
|
||||
if ((priv->total_objects < params->numObjects) ||
|
||||
(params->index > (priv->total_objects - params->numObjects)) ||
|
||||
(params->mappingType != priv->mapping_type))
|
||||
(params->mappingType != priv->mapping_type) ||
|
||||
(params->bAllowMmap != priv->map_attrs.allow_mmap))
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto unlock_priv;
|
||||
@@ -1393,6 +1714,7 @@ NV_STATUS NV_API_CALL nv_dma_import_dma_buf
|
||||
(
|
||||
nv_dma_device_t *dma_dev,
|
||||
struct dma_buf *dma_buf,
|
||||
NvBool is_ro_device_map,
|
||||
NvU32 *size,
|
||||
struct sg_table **sgt,
|
||||
nv_dma_buf_t **import_priv
|
||||
@@ -1432,7 +1754,19 @@ NV_STATUS NV_API_CALL nv_dma_import_dma_buf
|
||||
goto dma_buf_attach_fail;
|
||||
}
|
||||
|
||||
map_sgt = dma_buf_map_attachment(dma_attach, DMA_BIDIRECTIONAL);
|
||||
if (is_ro_device_map)
|
||||
{
|
||||
// Try RO only dma mapping.
|
||||
nv_dma_buf->direction = DMA_TO_DEVICE;
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"NVRM: nv_dma_import_dma_buf -Try RO [DMA_TO_DEVICE] only mapping \n");
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_dma_buf->direction = DMA_BIDIRECTIONAL;
|
||||
}
|
||||
|
||||
map_sgt = dma_buf_map_attachment(dma_attach, nv_dma_buf->direction);
|
||||
if (IS_ERR_OR_NULL(map_sgt))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Can't map dma attachment!\n");
|
||||
@@ -1467,6 +1801,7 @@ NV_STATUS NV_API_CALL nv_dma_import_from_fd
|
||||
(
|
||||
nv_dma_device_t *dma_dev,
|
||||
NvS32 fd,
|
||||
NvBool is_ro_device_map,
|
||||
NvU32 *size,
|
||||
struct sg_table **sgt,
|
||||
nv_dma_buf_t **import_priv
|
||||
@@ -1483,7 +1818,8 @@ NV_STATUS NV_API_CALL nv_dma_import_from_fd
|
||||
}
|
||||
|
||||
status = nv_dma_import_dma_buf(dma_dev,
|
||||
dma_buf, size, sgt, import_priv);
|
||||
dma_buf, is_ro_device_map, size,
|
||||
sgt, import_priv);
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
return status;
|
||||
@@ -1507,7 +1843,7 @@ void NV_API_CALL nv_dma_release_dma_buf
|
||||
|
||||
nv_dma_buf = (nv_dma_buf_t *)import_priv;
|
||||
dma_buf_unmap_attachment(nv_dma_buf->dma_attach, nv_dma_buf->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
nv_dma_buf->direction);
|
||||
dma_buf_detach(nv_dma_buf->dma_buf, nv_dma_buf->dma_attach);
|
||||
dma_buf_put(nv_dma_buf->dma_buf);
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ static u32 *dsi_read_prop_array
|
||||
u32 *array_size
|
||||
)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
u32 *val_array = NULL;
|
||||
u32 count = 0;
|
||||
int ret = 0;
|
||||
@@ -44,12 +45,7 @@ static u32 *dsi_read_prop_array
|
||||
if (!prop)
|
||||
return NULL;
|
||||
|
||||
#if defined(NV_OF_PROPERTY_COUNT_ELEMS_OF_SIZE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
count = of_property_count_elems_of_size(np, prop->name, sizeof(u32));
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, of_property_count_elems_of_size not present\n");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
#endif
|
||||
|
||||
if (count > 0)
|
||||
{
|
||||
@@ -66,13 +62,8 @@ static u32 *dsi_read_prop_array
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
#if defined(NV_OF_PROPERTY_READ_VARIABLE_U32_ARRAY_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
ret = of_property_read_variable_u32_array(np, prop->name,
|
||||
val_array, 0, count);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, of_property_read_variable_u32_array not present\n");
|
||||
ret = -ENOSYS;
|
||||
#endif
|
||||
if (IS_ERR(&ret))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, failed to read property %s", prop->name);
|
||||
@@ -84,6 +75,10 @@ static u32 *dsi_read_prop_array
|
||||
*array_size = count;
|
||||
|
||||
return val_array;
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, platform device not supported\n");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int dsi_get_panel_timings(struct device_node *np_panel, DSI_PANEL_INFO *panelInfo)
|
||||
@@ -174,7 +169,7 @@ static int dsi_get_panel_gpio(struct device_node *node, DSI_PANEL_INFO *panel)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
#if defined(NV_OF_GET_NAME_GPIO_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
panel->panel_gpio[DSI_GPIO_LCD_RESET] =
|
||||
of_get_named_gpio(node, "nvidia,panel-rst-gpio", 0);
|
||||
|
||||
@@ -855,7 +850,7 @@ nv_dsi_panel_enable
|
||||
void *dsiPanelInfo
|
||||
)
|
||||
{
|
||||
#if defined(NV_GPIO_DIRECTION_OUTPUT_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
int ret = NV_OK;
|
||||
DSI_PANEL_INFO *panelInfo = dsiPanelInfo;
|
||||
|
||||
@@ -897,7 +892,7 @@ nv_dsi_panel_reset
|
||||
void *dsiPanelInfo
|
||||
)
|
||||
{
|
||||
#if defined(NV_GPIO_DIRECTION_OUTPUT_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
int ret = NV_OK;
|
||||
int en_panel_rst = -1;
|
||||
DSI_PANEL_INFO *panelInfo = dsiPanelInfo;
|
||||
@@ -945,7 +940,7 @@ void nv_dsi_panel_disable
|
||||
void *dsiPanelInfo
|
||||
)
|
||||
{
|
||||
#if defined(NV_GPIO_DIRECTION_OUTPUT_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
DSI_PANEL_INFO *panelInfo = dsiPanelInfo;
|
||||
|
||||
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE])) {
|
||||
|
||||
@@ -30,15 +30,6 @@
|
||||
|
||||
#define NV_GPIOF_DIR_IN (1 << 0)
|
||||
|
||||
/*!
|
||||
* @brief Mapping array of OS GPIO function ID to OS function name,
|
||||
* this name is used to get GPIO number from Device Tree.
|
||||
*/
|
||||
static const char *osMapGpioFunc[] = {
|
||||
[NV_OS_GPIO_FUNC_HOTPLUG_A] = "os_gpio_hotplug_a",
|
||||
[NV_OS_GPIO_FUNC_HOTPLUG_B] = "os_gpio_hotplug_b",
|
||||
};
|
||||
|
||||
NV_STATUS NV_API_CALL nv_gpio_get_pin_state
|
||||
(
|
||||
nv_state_t *nv,
|
||||
@@ -46,17 +37,13 @@ NV_STATUS NV_API_CALL nv_gpio_get_pin_state
|
||||
NvU32 *pinValue
|
||||
)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
int ret;
|
||||
|
||||
#if defined(NV_GPIO_GET_VALUE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
ret = gpio_get_value(pinNum);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
if (ret < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed with err: %d\n",
|
||||
__func__, ret);
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
@@ -64,6 +51,10 @@ NV_STATUS NV_API_CALL nv_gpio_get_pin_state
|
||||
*pinValue = ret;
|
||||
|
||||
return NV_OK;
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_gpio_set_pin_state
|
||||
@@ -73,10 +64,10 @@ void NV_API_CALL nv_gpio_set_pin_state
|
||||
NvU32 pinValue
|
||||
)
|
||||
{
|
||||
#if defined(NV_GPIO_SET_VALUE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
gpio_set_value(pinNum, pinValue);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "gpio_set_value not present\n");
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -87,35 +78,30 @@ NV_STATUS NV_API_CALL nv_gpio_set_pin_direction
|
||||
NvU32 direction
|
||||
)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
int ret;
|
||||
|
||||
if (direction)
|
||||
{
|
||||
#if defined(NV_GPIO_DIRECTION_INPUT_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
ret = gpio_direction_input(pinNum);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "gpio_direction_input not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
#if defined(NV_GPIO_DIRECTION_OUTPUT_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
ret = gpio_direction_output(pinNum, 0);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "gpio_direction_output not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (ret)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed with err: %d\n",
|
||||
__func__, ret);
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_gpio_get_pin_direction
|
||||
@@ -135,7 +121,7 @@ NV_STATUS NV_API_CALL nv_gpio_get_pin_direction
|
||||
ret = nv_gpio_get_direction(pinNum);
|
||||
if (ret)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed with err: %d\n",
|
||||
__func__, ret);
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
@@ -152,40 +138,44 @@ NV_STATUS NV_API_CALL nv_gpio_get_pin_number
|
||||
NvU32 *pinNum
|
||||
)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
int rc;
|
||||
|
||||
(void)nvl;
|
||||
/*!
|
||||
* @brief Mapping array of OS GPIO function ID to OS function name,
|
||||
* this name is used to get GPIO number from Device Tree.
|
||||
*/
|
||||
static const char *osMapGpioFunc[] = {
|
||||
[NV_OS_GPIO_FUNC_HOTPLUG_A] = "os_gpio_hotplug_a",
|
||||
[NV_OS_GPIO_FUNC_HOTPLUG_B] = "os_gpio_hotplug_b",
|
||||
[NV_OS_GPIO_FUNC_HOTPLUG_C] = "os_gpio_hotplug_c",
|
||||
[NV_OS_GPIO_FUNC_HOTPLUG_D] = "os_gpio_hotplug_d",
|
||||
};
|
||||
|
||||
#if defined(NV_OF_GET_NAME_GPIO_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
rc = of_get_named_gpio(nvl->dev->of_node, osMapGpioFunc[function], 0);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "of_get_named_gpio not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "of_get_name_gpio failed for gpio - %s, rc - %d\n",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: of_get_name_gpio failed for gpio - %s, rc - %d\n",
|
||||
osMapGpioFunc[function], rc);
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
*pinNum = rc;
|
||||
|
||||
#if defined(NV_DEVM_GPIO_REQUEST_ONE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
rc = devm_gpio_request_one(nvl->dev, *pinNum, NV_GPIOF_DIR_IN,
|
||||
osMapGpioFunc[function]);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "devm_gpio_request_one not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "request gpio failed for gpio - %s, rc - %d\n",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: request gpio failed for gpio - %s, rc - %d\n",
|
||||
osMapGpioFunc[function], rc);
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
}
|
||||
|
||||
NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status
|
||||
@@ -195,6 +185,7 @@ NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status
|
||||
NvU32 direction
|
||||
)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
NvU32 irqGpioPin;
|
||||
NvU32 pinValue;
|
||||
|
||||
@@ -209,18 +200,17 @@ NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status
|
||||
return NV_FALSE;
|
||||
}
|
||||
|
||||
#if defined(NV_GPIO_GET_VALUE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
pinValue = gpio_get_value(pinNum);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n");
|
||||
return NV_FALSE;
|
||||
#endif
|
||||
if (pinValue != direction)
|
||||
{
|
||||
return NV_FALSE;
|
||||
}
|
||||
|
||||
return NV_TRUE;
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
|
||||
return NV_FALSE;
|
||||
#endif
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt
|
||||
@@ -230,16 +220,12 @@ NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt
|
||||
NvU32 trigger_level
|
||||
)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
int rc;
|
||||
int irq_num;
|
||||
|
||||
#if defined(NV_GPIO_TO_IRQ_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
irq_num = gpio_to_irq(pinNum);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "gpio_to_irq not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Ignore setting interrupt for falling trigger for hotplug gpio pin
|
||||
@@ -261,7 +247,7 @@ NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt
|
||||
"hdmi-hotplug");
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "IRQ registration failed for gpio - %d, rc - %d\n",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: IRQ registration failed for gpio - %d, rc - %d\n",
|
||||
pinNum, rc);
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
@@ -270,4 +256,8 @@ NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt
|
||||
disable_irq_nosync(irq_num);
|
||||
|
||||
return NV_OK;
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
|
||||
return NV_ERR_GENERIC;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -26,9 +26,11 @@
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(NV_LINUX_NVHOST_T194_H_PRESENT)
|
||||
#if defined(NV_LINUX_NVHOST_H_PRESENT)
|
||||
#include <linux/nvhost.h>
|
||||
#if defined(NV_LINUX_NVHOST_T194_H_PRESENT)
|
||||
#include <linux/nvhost_t194.h>
|
||||
#endif
|
||||
|
||||
NV_STATUS nv_get_syncpoint_aperture
|
||||
(
|
||||
@@ -42,25 +44,18 @@ NV_STATUS nv_get_syncpoint_aperture
|
||||
phys_addr_t base;
|
||||
size_t size;
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_get_default_device
|
||||
host1x_pdev = nvhost_get_default_device();
|
||||
if (host1x_pdev == NULL)
|
||||
{
|
||||
return NV_ERR_INVALID_DEVICE;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_aperture && \
|
||||
NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_byte_offset
|
||||
nvhost_syncpt_unit_interface_get_aperture(
|
||||
host1x_pdev, &base, &size);
|
||||
|
||||
*physAddr = base;
|
||||
*limit = nvhost_syncpt_unit_interface_get_byte_offset(1);
|
||||
*offset = nvhost_syncpt_unit_interface_get_byte_offset(syncpointId);
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
@@ -249,13 +249,13 @@ void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
|
||||
if (nvl->pci_dev != NULL)
|
||||
{
|
||||
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
|
||||
"NVIDIA i2c adapter %u at %x:%02x.%u", port, nv->pci_info.bus,
|
||||
"NVIDIA i2c adapter %u at %x:%02x.%u\n", port, nv->pci_info.bus,
|
||||
nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn));
|
||||
}
|
||||
else
|
||||
{
|
||||
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
|
||||
"NVIDIA SOC i2c adapter %u", port);
|
||||
"NVIDIA SOC i2c adapter %u\n", port);
|
||||
}
|
||||
|
||||
// add our data to the structure
|
||||
@@ -306,7 +306,7 @@ static struct i2c_client * nv_i2c_register_client(
|
||||
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
|
||||
if (i2c_adapter == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to get i2c adapter for port(%d)\n",
|
||||
linuxI2CSwPort);
|
||||
return NULL;
|
||||
}
|
||||
@@ -314,12 +314,12 @@ static struct i2c_client * nv_i2c_register_client(
|
||||
#if defined(NV_I2C_NEW_CLIENT_DEVICE_PRESENT)
|
||||
client = i2c_new_client_device(i2c_adapter, &i2c_dev_info);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "nv_i2c_new_device not present\n");
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: nv_i2c_new_device not present\n");
|
||||
client = NULL;
|
||||
#endif
|
||||
if (client == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Unable to register client for address(0x%x)",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to register client for address(0x%x)\n",
|
||||
address);
|
||||
i2c_put_adapter(i2c_adapter);
|
||||
return NULL;
|
||||
@@ -394,7 +394,7 @@ NV_STATUS NV_API_CALL nv_i2c_transfer(
|
||||
//
|
||||
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort);
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Invalid I2C port:%d\n", linuxI2CSwPort);
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
@@ -411,7 +411,7 @@ NV_STATUS NV_API_CALL nv_i2c_transfer(
|
||||
client = nv_i2c_register_client(nv, linuxI2CSwPort, nv_msgs[count].addr);
|
||||
if (client == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "i2c client register failed for addr:0x%x\n",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: i2c client register failed for addr:0x%x\n",
|
||||
nv_msgs[count].addr);
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
@@ -421,7 +421,7 @@ NV_STATUS NV_API_CALL nv_i2c_transfer(
|
||||
msgs = kzalloc((num_msgs * sizeof(*msgs)), GFP_KERNEL);
|
||||
if (msgs == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "i2c message allocation failed\n");
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: i2c message allocation failed\n");
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
@@ -435,7 +435,7 @@ NV_STATUS NV_API_CALL nv_i2c_transfer(
|
||||
rc = i2c_transfer(client->adapter, msgs, num_msgs);
|
||||
if (rc != num_msgs)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "i2c transfer failed for addr:0x%x",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: i2c transfer failed for addr: 0x%x\n",
|
||||
address);
|
||||
status = NV_ERR_GENERIC;
|
||||
}
|
||||
@@ -461,11 +461,7 @@ void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv)
|
||||
client = (struct i2c_client *)nvl->i2c_clients[p_index].pOsClient[c_index];
|
||||
if (client)
|
||||
{
|
||||
#if defined(NV_I2C_UNREGISTER_DEVICE_PRESENT)
|
||||
i2c_unregister_device(client);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "i2c_unregister_device not present\n");
|
||||
#endif
|
||||
nvl->i2c_clients[p_index].pOsClient[c_index] = NULL;
|
||||
}
|
||||
}
|
||||
@@ -488,7 +484,7 @@ NV_STATUS NV_API_CALL nv_i2c_bus_status(
|
||||
//
|
||||
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort);
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Invalid I2C port:%d\n", linuxI2CSwPort);
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
@@ -496,7 +492,7 @@ NV_STATUS NV_API_CALL nv_i2c_bus_status(
|
||||
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
|
||||
if (i2c_adapter == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to get i2c adapter for port(%d)\n",
|
||||
linuxI2CSwPort);
|
||||
return NULL;
|
||||
}
|
||||
@@ -505,7 +501,7 @@ NV_STATUS NV_API_CALL nv_i2c_bus_status(
|
||||
ret = i2c_bus_status(i2c_adapter, scl, sda);
|
||||
if (ret < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "i2c_bus_status failed:%d\n", ret);
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: i2c_bus_status failed:%d\n", ret);
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
i2c_put_adapter(i2c_adapter);
|
||||
@@ -531,7 +527,9 @@ void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
|
||||
#endif
|
||||
|
||||
|
||||
#if !NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
|
||||
#if !NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE || \
|
||||
(NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE && \
|
||||
(!defined(CONFIG_I2C) && !defined(CONFIG_I2C_MODULE)))
|
||||
|
||||
NV_STATUS NV_API_CALL nv_i2c_transfer(
|
||||
nv_state_t *nv,
|
||||
|
||||
@@ -26,3 +26,325 @@
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
#if defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
#include <soc/tegra/bpmp-abi.h>
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
#include <soc/tegra/bpmp.h>
|
||||
#elif defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT)
|
||||
#include <soc/tegra/tegra_bpmp.h>
|
||||
#endif // IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
|
||||
#if defined NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT
|
||||
#include <dt-bindings/interconnect/tegra_icc_id.h>
|
||||
#endif
|
||||
|
||||
#ifdef NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT
|
||||
#include <linux/platform/tegra/mc_utils.h>
|
||||
#endif
|
||||
|
||||
//
|
||||
// IMP requires information from various BPMP and MC driver functions. The
|
||||
// macro below checks that all of the required functions are present.
|
||||
//
|
||||
#define IMP_SUPPORT_FUNCTIONS_PRESENT \
|
||||
(defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || \
|
||||
IS_ENABLED(CONFIG_TEGRA_BPMP)) && \
|
||||
defined(NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT)
|
||||
|
||||
//
|
||||
// Also create a macro to check if all the required ICC symbols are present.
|
||||
// DT endpoints are defined in dt-bindings/interconnect/tegra_icc_id.h.
|
||||
//
|
||||
#define ICC_SUPPORT_FUNCTIONS_PRESENT \
|
||||
defined(NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT)
|
||||
|
||||
/*!
|
||||
* @brief Returns IMP-relevant data collected from other modules
|
||||
*
|
||||
* @param[out] tegra_imp_import_data Structure to receive the data
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available.
|
||||
*/
|
||||
NV_STATUS NV_API_CALL
|
||||
nv_imp_get_import_data
|
||||
(
|
||||
TEGRA_IMP_IMPORT_DATA *tegra_imp_import_data
|
||||
)
|
||||
{
|
||||
#if IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
tegra_imp_import_data->num_dram_channels = get_dram_num_channels();
|
||||
nv_printf(NV_DBG_INFO, "NVRM: num_dram_channels = %u\n",
|
||||
tegra_imp_import_data->num_dram_channels);
|
||||
|
||||
return NV_OK;
|
||||
#else // IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Tells BPMP whether or not RFL is valid
|
||||
*
|
||||
* Display HW generates an ok_to_switch signal which asserts when mempool
|
||||
* occupancy is high enough to be able to turn off memory long enough to
|
||||
* execute a dramclk frequency switch without underflowing display output.
|
||||
* ok_to_switch drives the RFL ("request for latency") signal in the memory
|
||||
* unit, and the switch sequencer waits for this signal to go active before
|
||||
* starting a dramclk switch. However, if the signal is not valid (e.g., if
|
||||
* display HW or SW has not been initialized yet), the switch sequencer ignores
|
||||
* the signal. This API tells BPMP whether or not the signal is valid.
|
||||
*
|
||||
* @param[in] nv Per GPU Linux state
|
||||
* @param[in] bEnable True if RFL will be valid; false if invalid
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
|
||||
* NV_ERR_GENERIC if some other kind of error occurred.
|
||||
*/
|
||||
NV_STATUS NV_API_CALL
|
||||
nv_imp_enable_disable_rfl
|
||||
(
|
||||
nv_state_t *nv,
|
||||
NvBool bEnable
|
||||
)
|
||||
{
|
||||
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
|
||||
#if IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct tegra_bpmp *bpmp = tegra_bpmp_get(nvl->dev);
|
||||
struct tegra_bpmp_message msg;
|
||||
struct mrq_emc_disp_rfl_request emc_disp_rfl_request;
|
||||
int rc;
|
||||
|
||||
memset(&emc_disp_rfl_request, 0, sizeof(emc_disp_rfl_request));
|
||||
emc_disp_rfl_request.mode = bEnable ? EMC_DISP_RFL_MODE_ENABLED :
|
||||
EMC_DISP_RFL_MODE_DISABLED;
|
||||
msg.mrq = MRQ_EMC_DISP_RFL;
|
||||
msg.tx.data = &emc_disp_rfl_request;
|
||||
msg.tx.size = sizeof(emc_disp_rfl_request);
|
||||
msg.rx.data = NULL;
|
||||
msg.rx.size = 0;
|
||||
|
||||
rc = tegra_bpmp_transfer(bpmp, &msg);
|
||||
if (rc == 0)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"\"Wait for RFL\" is %s via MRQ_EMC_DISP_RFL\n",
|
||||
bEnable ? "enabled" : "disabled");
|
||||
status = NV_OK;
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"MRQ_EMC_DISP_RFL failed to %s \"Wait for RFL\" (error code = %d)\n",
|
||||
bEnable ? "enable" : "disable",
|
||||
rc);
|
||||
status = NV_ERR_GENERIC;
|
||||
}
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "nv_imp_enable_disable_rfl stub called!\n");
|
||||
#endif
|
||||
#endif
|
||||
return status;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Obtains a handle for the display data path
|
||||
*
|
||||
* If a handle is obtained successfully, it is not returned to the caller; it
|
||||
* is saved for later use by subsequent nv_imp_icc_set_bw calls.
|
||||
* nv_imp_icc_get must be called prior to calling nv_imp_icc_set_bw.
|
||||
*
|
||||
* @param[out] nv Per GPU Linux state
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
|
||||
* NV_ERR_GENERIC if some other error occurred.
|
||||
*/
|
||||
NV_STATUS NV_API_CALL
|
||||
nv_imp_icc_get
|
||||
(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
#if ICC_SUPPORT_FUNCTIONS_PRESENT && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
NV_STATUS status = NV_OK;
|
||||
|
||||
#if defined(NV_DEVM_ICC_GET_PRESENT)
|
||||
// Needs to use devm_of_icc_get function as per the latest ICC driver
|
||||
nvl->nv_imp_icc_path =
|
||||
devm_of_icc_get(nvl->dev, "read-1");
|
||||
|
||||
if (nvl->nv_imp_icc_path == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO, "NVRM: devm_of_icc_get failed\n");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
else if (!IS_ERR(nvl->nv_imp_icc_path))
|
||||
{
|
||||
nvl->is_upstream_icc_path = NV_TRUE;
|
||||
return NV_OK;
|
||||
}
|
||||
//
|
||||
// Till we modify all DTs to have interconnect node specified as per
|
||||
// the latest ICC driver, fallback to older ICC mechanism.
|
||||
//
|
||||
#endif
|
||||
|
||||
nvl->nv_imp_icc_path = NULL;
|
||||
|
||||
#if defined(NV_ICC_GET_PRESENT)
|
||||
struct device_node *np;
|
||||
// Check if ICC is present in the device tree, and enabled.
|
||||
np = of_find_node_by_path("/icc");
|
||||
if (np != NULL)
|
||||
{
|
||||
if (of_device_is_available(np))
|
||||
{
|
||||
// Get the ICC data path.
|
||||
nvl->nv_imp_icc_path =
|
||||
icc_get(nvl->dev, TEGRA_ICC_DISPLAY, TEGRA_ICC_PRIMARY);
|
||||
}
|
||||
of_node_put(np);
|
||||
}
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: icc_get() not present\n");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
|
||||
if (nvl->nv_imp_icc_path == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO, "NVRM: icc_get disabled\n");
|
||||
status = NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
else if (IS_ERR(nvl->nv_imp_icc_path))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: invalid path = %ld\n",
|
||||
PTR_ERR(nvl->nv_imp_icc_path));
|
||||
nvl->nv_imp_icc_path = NULL;
|
||||
status = NV_ERR_GENERIC;
|
||||
}
|
||||
return status;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Releases the handle obtained by nv_imp_icc_get
|
||||
*
|
||||
* @param[in] nv Per GPU Linux state
|
||||
*/
|
||||
void
|
||||
nv_imp_icc_put
|
||||
(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
#if ICC_SUPPORT_FUNCTIONS_PRESENT
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
#if defined(NV_DEVM_ICC_GET_PRESENT)
|
||||
//
|
||||
// If devm_of_icc_get API is used for requesting the bandwidth,
|
||||
// it does not require to call put explicitly.
|
||||
//
|
||||
if (nvl->is_upstream_icc_path)
|
||||
{
|
||||
goto done;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(NV_ICC_PUT_PRESENT) && NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
|
||||
if (nvl->nv_imp_icc_path != NULL)
|
||||
{
|
||||
icc_put(nvl->nv_imp_icc_path);
|
||||
}
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "icc_put() not present\n");
|
||||
#endif
|
||||
|
||||
done:
|
||||
nvl->nv_imp_icc_path = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Allocates a specified amount of ISO memory bandwidth for display
|
||||
*
|
||||
* floor_bw_kbps is the minimum required (i.e., floor) dramclk frequency
|
||||
* multiplied by the width of the pipe over which the display data will travel.
|
||||
* (It is understood that the bandwidth calculated by multiplying the clock
|
||||
* frequency by the pipe width will not be realistically achievable, due to
|
||||
* overhead in the memory subsystem. ICC will not actually use the bandwidth
|
||||
* value, except to reverse the calculation to get the required dramclk
|
||||
* frequency.)
|
||||
*
|
||||
* nv_imp_icc_get must be called prior to calling this function.
|
||||
*
|
||||
* @param[in] nv Per GPU Linux state
|
||||
* @param[in] avg_bw_kbps Amount of ISO memory bandwidth requested
|
||||
* @param[in] floor_bw_kbps Min required dramclk freq * pipe width
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too
|
||||
* high, and bandwidth cannot be allocated,
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
|
||||
* NV_ERR_GENERIC if some other kind of error occurred.
|
||||
*/
|
||||
NV_STATUS NV_API_CALL
|
||||
nv_imp_icc_set_bw
|
||||
(
|
||||
nv_state_t *nv,
|
||||
NvU32 avg_bw_kbps,
|
||||
NvU32 floor_bw_kbps
|
||||
)
|
||||
{
|
||||
#if ICC_SUPPORT_FUNCTIONS_PRESENT && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
int rc;
|
||||
NV_STATUS status = NV_OK;
|
||||
|
||||
//
|
||||
// avg_bw_kbps can be either ISO bw request or NISO bw request.
|
||||
// Use floor_bw_kbps to make floor requests.
|
||||
//
|
||||
#if defined(NV_ICC_SET_BW_PRESENT)
|
||||
//
|
||||
// nv_imp_icc_path will be NULL on AV + L systems because ICC is disabled.
|
||||
// In this case, skip the allocation call, and just return a success
|
||||
// status.
|
||||
//
|
||||
if (nvl->nv_imp_icc_path == NULL)
|
||||
{
|
||||
return NV_OK;
|
||||
}
|
||||
rc = icc_set_bw(nvl->nv_imp_icc_path, avg_bw_kbps, floor_bw_kbps);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "icc_set_bw() not present\n");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
|
||||
if (rc < 0)
|
||||
{
|
||||
// A negative return value indicates an error.
|
||||
if (rc == -ENOMEM)
|
||||
{
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = NV_ERR_GENERIC;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -28,3 +28,116 @@
|
||||
|
||||
#include "dce_rm_client_ipc.h"
|
||||
|
||||
#if defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT)
|
||||
#include <linux/platform/tegra/dce/dce-client-ipc.h>
|
||||
|
||||
static const NvU32 dceClientRmIpcTypeMap[DCE_CLIENT_RM_IPC_TYPE_MAX] = {
|
||||
[DCE_CLIENT_RM_IPC_TYPE_SYNC] = DCE_CLIENT_IPC_TYPE_CPU_RM,
|
||||
[DCE_CLIENT_RM_IPC_TYPE_EVENT] = DCE_CLIENT_IPC_TYPE_RM_EVENT,
|
||||
};
|
||||
|
||||
static NV_STATUS validate_dce_client_ipc_interface_type(NvU32 interfaceType)
|
||||
{
|
||||
if (interfaceType >= DCE_CLIENT_RM_IPC_TYPE_MAX)
|
||||
{
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
if (dceClientRmIpcTypeMap[interfaceType] >= DCE_CLIENT_IPC_TYPE_MAX)
|
||||
{
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType)
|
||||
{
|
||||
NvU32 interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC;
|
||||
|
||||
for (interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC;
|
||||
interfaceType < DCE_CLIENT_RM_IPC_TYPE_MAX;
|
||||
interfaceType++)
|
||||
{
|
||||
if (dceClientRmIpcTypeMap[interfaceType] == clientIpcType)
|
||||
return interfaceType;
|
||||
}
|
||||
|
||||
return NV_ERR_INVALID_DATA;
|
||||
}
|
||||
|
||||
NV_STATUS nv_tegra_dce_register_ipc_client
|
||||
(
|
||||
NvU32 interfaceType,
|
||||
void *usrCtx,
|
||||
nvTegraDceClientIpcCallback callbackFn,
|
||||
NvU32 *handle
|
||||
)
|
||||
{
|
||||
NvU32 dceClientInterfaceType = DCE_CLIENT_IPC_TYPE_MAX;
|
||||
|
||||
if (validate_dce_client_ipc_interface_type(interfaceType) != NV_OK)
|
||||
{
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
dceClientInterfaceType = dceClientRmIpcTypeMap[interfaceType];
|
||||
|
||||
return tegra_dce_register_ipc_client(dceClientInterfaceType, callbackFn, usrCtx, handle);
|
||||
}
|
||||
|
||||
NV_STATUS nv_tegra_dce_client_ipc_send_recv
|
||||
(
|
||||
NvU32 clientId,
|
||||
void *msg,
|
||||
NvU32 msgLength
|
||||
)
|
||||
{
|
||||
struct dce_ipc_message dce_ipc_msg;
|
||||
|
||||
memset(&dce_ipc_msg, 0, sizeof(struct dce_ipc_message));
|
||||
dce_ipc_msg.tx.data = msg;
|
||||
dce_ipc_msg.rx.data = msg;
|
||||
dce_ipc_msg.tx.size = msgLength;
|
||||
dce_ipc_msg.rx.size = msgLength;
|
||||
|
||||
return tegra_dce_client_ipc_send_recv(clientId, &dce_ipc_msg);
|
||||
}
|
||||
|
||||
NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId)
|
||||
{
|
||||
return tegra_dce_unregister_ipc_client(clientId);
|
||||
}
|
||||
#else
|
||||
NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
NV_STATUS nv_tegra_dce_register_ipc_client
|
||||
(
|
||||
NvU32 interfaceType,
|
||||
void *usrCtx,
|
||||
nvTegraDceClientIpcCallback callbackFn,
|
||||
NvU32 *handle
|
||||
)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
NV_STATUS nv_tegra_dce_client_ipc_send_recv
|
||||
(
|
||||
NvU32 clientId,
|
||||
void *msg,
|
||||
NvU32 msgLength
|
||||
)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -29,12 +29,7 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#if defined(NV_LINUX_BUG_H_PRESENT)
|
||||
#include <linux/bug.h>
|
||||
#else
|
||||
#include <asm/bug.h>
|
||||
#endif
|
||||
#include <linux/bug.h>
|
||||
|
||||
// Today's implementation is a little simpler and more limited than the
|
||||
// API description allows for in nv-kthread-q.h. Details include:
|
||||
|
||||
@@ -162,7 +162,7 @@ nvidia_vma_access(
|
||||
return -EINVAL;
|
||||
|
||||
pageIndex = nv_array_index_no_speculate(pageIndex, at->num_pages);
|
||||
kernel_mapping = (void *)(at->page_table[pageIndex]->virt_addr + pageOffset);
|
||||
kernel_mapping = (void *)(at->page_table[pageIndex].virt_addr + pageOffset);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -205,15 +205,10 @@ found:
|
||||
}
|
||||
|
||||
static vm_fault_t nvidia_fault(
|
||||
#if !defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
|
||||
struct vm_area_struct *vma,
|
||||
#endif
|
||||
struct vm_fault *vmf
|
||||
)
|
||||
{
|
||||
#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
#endif
|
||||
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
|
||||
nv_linux_state_t *nvl = nvlfp->nvptr;
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
@@ -394,7 +389,7 @@ static int nvidia_mmap_peer_io(
|
||||
|
||||
BUG_ON(!at->flags.contig);
|
||||
|
||||
start = at->page_table[page_index]->phys_addr;
|
||||
start = at->page_table[page_index].phys_addr;
|
||||
size = pages * PAGE_SIZE;
|
||||
|
||||
ret = nv_io_remap_page_range(vma, start, size, vma->vm_start);
|
||||
@@ -420,13 +415,22 @@ static int nvidia_mmap_sysmem(
|
||||
{
|
||||
j = nv_array_index_no_speculate(j, (page_index + pages));
|
||||
|
||||
//
|
||||
// nv_remap_page_range() map a contiguous physical address space
|
||||
// into the user virtual space.
|
||||
// Use PFN based mapping api to create the mapping for
|
||||
// reserved carveout (OS invisible memory, not managed by OS) too.
|
||||
// Basically nv_remap_page_range() works for all kind of memory regions.
|
||||
// Imported buffer can be either from OS or Non OS managed regions (reserved carveout).
|
||||
// nv_remap_page_range() works well for all type of import buffers.
|
||||
//
|
||||
if (
|
||||
#if defined(NV_VGPU_KVM_BUILD)
|
||||
at->flags.guest ||
|
||||
#endif
|
||||
at->flags.carveout)
|
||||
at->flags.carveout || at->import_sgt)
|
||||
{
|
||||
ret = nv_remap_page_range(vma, start, at->page_table[j]->phys_addr,
|
||||
ret = nv_remap_page_range(vma, start, at->page_table[j].phys_addr,
|
||||
PAGE_SIZE, vma->vm_page_prot);
|
||||
}
|
||||
else
|
||||
@@ -435,12 +439,14 @@ static int nvidia_mmap_sysmem(
|
||||
vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot);
|
||||
|
||||
ret = vm_insert_page(vma, start,
|
||||
NV_GET_PAGE_STRUCT(at->page_table[j]->phys_addr));
|
||||
NV_GET_PAGE_STRUCT(at->page_table[j].phys_addr));
|
||||
}
|
||||
|
||||
if (ret)
|
||||
{
|
||||
NV_ATOMIC_DEC(at->usage_count);
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Userspace mapping creation failed [%d]!\n", ret);
|
||||
return -EAGAIN;
|
||||
}
|
||||
start += PAGE_SIZE;
|
||||
|
||||
@@ -113,13 +113,6 @@ static inline void nv_jiffy_timer_callback_typed_data(struct timer_list *timer)
|
||||
|
||||
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
|
||||
}
|
||||
|
||||
static inline void nv_jiffy_timer_callback_anon_data(unsigned long arg)
|
||||
{
|
||||
struct nv_nano_timer *nv_nstimer = (struct nv_nano_timer *)arg;
|
||||
|
||||
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*!
|
||||
@@ -158,13 +151,7 @@ void NV_API_CALL nv_create_nano_timer(
|
||||
nv_nstimer->hr_timer.function = nv_nano_timer_callback_typed_data;
|
||||
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_hrtimer_setup
|
||||
#else
|
||||
#if defined(NV_TIMER_SETUP_PRESENT)
|
||||
timer_setup(&nv_nstimer->jiffy_timer, nv_jiffy_timer_callback_typed_data, 0);
|
||||
#else
|
||||
init_timer(&nv_nstimer->jiffy_timer);
|
||||
nv_nstimer->jiffy_timer.function = nv_jiffy_timer_callback_anon_data;
|
||||
nv_nstimer->jiffy_timer.data = (unsigned long)nv_nstimer;
|
||||
#endif // NV_TIMER_SETUP_PRESENT
|
||||
#endif // NV_NANO_TIMER_USE_HRTIMER
|
||||
|
||||
*pnv_nstimer = nv_nstimer;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -415,6 +415,7 @@ static int nv_p2p_get_pages(
|
||||
NvU8 *gpu_uuid = NULL;
|
||||
NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0};
|
||||
NvBool force_pcie = !!(flags & NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING);
|
||||
NvBool cpu_cacheable;
|
||||
int rc;
|
||||
|
||||
if (!NV_IS_ALIGNED64(virtual_address, NVRM_P2P_PAGESIZE_BIG_64K) ||
|
||||
@@ -520,7 +521,7 @@ static int nv_p2p_get_pages(
|
||||
&mem_info->private,
|
||||
physical_addresses, &entries,
|
||||
force_pcie, *page_table, gpu_info,
|
||||
&mem_info->mig_info);
|
||||
&mem_info->mig_info, &cpu_cacheable);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
@@ -531,7 +532,7 @@ static int nv_p2p_get_pages(
|
||||
// Get regular old-style, non-persistent mappings
|
||||
status = rm_p2p_get_pages(sp, p2p_token, va_space,
|
||||
virtual_address, length, physical_addresses, wreqmb_h,
|
||||
rreqmb_h, &entries, &gpu_uuid, *page_table);
|
||||
rreqmb_h, &entries, &gpu_uuid, *page_table, &cpu_cacheable);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
@@ -578,6 +579,11 @@ static int nv_p2p_get_pages(
|
||||
|
||||
(*page_table)->page_size = page_size_index;
|
||||
|
||||
if (cpu_cacheable)
|
||||
{
|
||||
(*page_table)->flags |= NVIDIA_P2P_PAGE_TABLE_FLAGS_CPU_CACHEABLE;
|
||||
}
|
||||
|
||||
os_free_mem(physical_addresses);
|
||||
physical_addresses = NULL;
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -132,11 +132,16 @@ struct nvidia_p2p_page {
|
||||
} registers;
|
||||
} nvidia_p2p_page_t;
|
||||
|
||||
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010002
|
||||
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00020000
|
||||
|
||||
#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \
|
||||
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION)
|
||||
|
||||
/*
|
||||
* Page Table Flags
|
||||
*/
|
||||
#define NVIDIA_P2P_PAGE_TABLE_FLAGS_CPU_CACHEABLE 0x1
|
||||
|
||||
typedef
|
||||
struct nvidia_p2p_page_table {
|
||||
uint32_t version;
|
||||
@@ -144,6 +149,7 @@ struct nvidia_p2p_page_table {
|
||||
struct nvidia_p2p_page **pages;
|
||||
uint32_t entries;
|
||||
uint8_t *gpu_uuid;
|
||||
uint32_t flags;
|
||||
} nvidia_p2p_page_table_t;
|
||||
|
||||
/*
|
||||
@@ -153,6 +159,9 @@ struct nvidia_p2p_page_table {
|
||||
*
|
||||
* This API only supports pinned, GPU-resident memory, such as that provided
|
||||
* by cudaMalloc().
|
||||
* This API does not support Coherent Driver-based Memory Management(CDMM) mode.
|
||||
* CDMM allows coherent GPU memory to be managed by the driver and not the OS.
|
||||
* This is done by the driver not onlining the memory as a NUMA node.
|
||||
*
|
||||
* This API may sleep.
|
||||
*
|
||||
@@ -201,7 +210,7 @@ int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
|
||||
* accessible to a third-party device. The pages will persist until
|
||||
* explicitly freed by nvidia_p2p_put_pages_persistent().
|
||||
*
|
||||
* Persistent GPU memory mappings are not supported on PowerPC,
|
||||
* Persistent GPU memory mappings are not supported on
|
||||
* MIG-enabled devices and vGPU.
|
||||
*
|
||||
* This API only supports pinned, GPU-resident memory, such as that provided
|
||||
|
||||
@@ -61,8 +61,8 @@ static inline void nv_disable_caches(unsigned long *cr4)
|
||||
unsigned long cr0 = read_cr0();
|
||||
write_cr0(((cr0 & (0xdfffffff)) | 0x40000000));
|
||||
wbinvd();
|
||||
*cr4 = NV_READ_CR4();
|
||||
if (*cr4 & 0x80) NV_WRITE_CR4(*cr4 & ~0x80);
|
||||
*cr4 = __read_cr4();
|
||||
if (*cr4 & 0x80) __write_cr4(*cr4 & ~0x80);
|
||||
__flush_tlb();
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ static inline void nv_enable_caches(unsigned long cr4)
|
||||
wbinvd();
|
||||
__flush_tlb();
|
||||
write_cr0((cr0 & 0x9fffffff));
|
||||
if (cr4 & 0x80) NV_WRITE_CR4(cr4);
|
||||
if (cr4 & 0x80) __write_cr4(cr4);
|
||||
}
|
||||
|
||||
static void nv_setup_pat_entries(void *info)
|
||||
@@ -122,29 +122,6 @@ static void nv_restore_pat_entries(void *info)
|
||||
NV_RESTORE_FLAGS(eflags);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE 1:
|
||||
* Functions register_cpu_notifier(), unregister_cpu_notifier(),
|
||||
* macros register_hotcpu_notifier, register_hotcpu_notifier,
|
||||
* and CPU states CPU_DOWN_FAILED, CPU_DOWN_PREPARE
|
||||
* were removed by the following commit:
|
||||
* 2016 Dec 25: b272f732f888d4cf43c943a40c9aaa836f9b7431
|
||||
*
|
||||
* NV_REGISTER_CPU_NOTIFIER_PRESENT is true when
|
||||
* register_cpu_notifier() is present.
|
||||
*
|
||||
* The functions cpuhp_setup_state() and cpuhp_remove_state() should be
|
||||
* used as an alternative to register_cpu_notifier() and
|
||||
* unregister_cpu_notifier() functions. The following
|
||||
* commit introduced these functions as well as the enum cpuhp_state.
|
||||
* 2016 Feb 26: 5b7aa87e0482be768486e0c2277aa4122487eb9d
|
||||
*
|
||||
* NV_CPUHP_CPUHP_STATE_PRESENT is true when cpuhp_setup_state() is present.
|
||||
*
|
||||
* For kernels where both cpuhp_setup_state() and register_cpu_notifier()
|
||||
* are present, we still use register_cpu_notifier().
|
||||
*/
|
||||
|
||||
static int
|
||||
nvidia_cpu_teardown(unsigned int cpu)
|
||||
{
|
||||
@@ -229,37 +206,13 @@ nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* See NOTE 1.
|
||||
* In order to avoid warnings for unused variable when compiling against
|
||||
* kernel versions which include changes of commit id
|
||||
* b272f732f888d4cf43c943a40c9aaa836f9b7431, we have to protect declaration
|
||||
* of nv_hotcpu_nfb with #if.
|
||||
*
|
||||
* NV_REGISTER_CPU_NOTIFIER_PRESENT is checked before
|
||||
* NV_CPUHP_SETUP_STATE_PRESENT to avoid compilation warnings for unused
|
||||
* variable nvidia_pat_online for kernels where both
|
||||
* NV_REGISTER_CPU_NOTIFIER_PRESENT and NV_CPUHP_SETUP_STATE_PRESENT
|
||||
* are true.
|
||||
*/
|
||||
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
|
||||
static struct notifier_block nv_hotcpu_nfb = {
|
||||
.notifier_call = nvidia_cpu_callback,
|
||||
.priority = 0
|
||||
};
|
||||
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
|
||||
static enum cpuhp_state nvidia_pat_online;
|
||||
#endif
|
||||
|
||||
static int
|
||||
nvidia_register_cpu_hotplug_notifier(void)
|
||||
{
|
||||
int ret;
|
||||
/* See NOTE 1 */
|
||||
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
|
||||
/* register_hotcpu_notiifer() returns 0 on success or -ENOENT on failure */
|
||||
ret = register_hotcpu_notifier(&nv_hotcpu_nfb);
|
||||
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
|
||||
|
||||
/*
|
||||
* cpuhp_setup_state() returns positive number on success when state is
|
||||
* CPUHP_AP_ONLINE_DYN. On failure, it returns a negative number.
|
||||
@@ -283,26 +236,6 @@ nvidia_register_cpu_hotplug_notifier(void)
|
||||
{
|
||||
nvidia_pat_online = ret;
|
||||
}
|
||||
#else
|
||||
|
||||
/*
|
||||
* This function should be a no-op for kernels which
|
||||
* - do not have CONFIG_HOTPLUG_CPU enabled,
|
||||
* - do not have PAT support,
|
||||
* - do not have the cpuhp_setup_state() function.
|
||||
*
|
||||
* On such kernels, returning an error here would result in module init
|
||||
* failure. Hence, return 0 here.
|
||||
*/
|
||||
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
|
||||
{
|
||||
ret = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
ret = -EIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ret < 0)
|
||||
{
|
||||
@@ -317,13 +250,7 @@ nvidia_register_cpu_hotplug_notifier(void)
|
||||
static void
|
||||
nvidia_unregister_cpu_hotplug_notifier(void)
|
||||
{
|
||||
/* See NOTE 1 */
|
||||
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
|
||||
unregister_hotcpu_notifier(&nv_hotcpu_nfb);
|
||||
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
|
||||
cpuhp_remove_state(nvidia_pat_online);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -24,10 +24,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#if defined NV_LINUX_OF_DEVICE_H_PRESENT
|
||||
#include <linux/of_device.h>
|
||||
#endif
|
||||
|
||||
#include "nv-platform.h"
|
||||
#include "nv-linux.h"
|
||||
@@ -340,7 +337,6 @@ void nv_soc_free_irqs(nv_state_t *nv)
|
||||
{
|
||||
nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_TCPC2DISP_TYPE);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void nv_platform_free_device_dpaux(nv_state_t *nv)
|
||||
@@ -374,7 +370,6 @@ static void nv_platform_free_device_dpaux(nv_state_t *nv)
|
||||
static int nv_platform_alloc_device_dpaux(struct platform_device *plat_dev, nv_state_t *nv)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
static const size_t MAX_LENGTH = 10;
|
||||
const char *sdpaux = "dpaux";
|
||||
int dpauxindex = 0;
|
||||
int irq = 0;
|
||||
@@ -429,7 +424,7 @@ static int nv_platform_alloc_device_dpaux(struct platform_device *plat_dev, nv_s
|
||||
|
||||
for (dpauxindex = 0; dpauxindex < nv->num_dpaux_instance; dpauxindex++)
|
||||
{
|
||||
char sdpaux_device[MAX_LENGTH];
|
||||
char sdpaux_device[10];
|
||||
snprintf(sdpaux_device, sizeof(sdpaux_device), "%s%d", sdpaux, dpauxindex);
|
||||
|
||||
NV_KMALLOC(nv->dpaux[dpauxindex], sizeof(*(nv->dpaux[dpauxindex])));
|
||||
@@ -514,6 +509,25 @@ NV_STATUS NV_API_CALL nv_soc_device_reset(nv_state_t *nv)
|
||||
}
|
||||
}
|
||||
|
||||
if (nvl->hdacodec_reset != NULL)
|
||||
{
|
||||
/*
|
||||
* HDACODEC reset control is shared between display driver and audio driver.
|
||||
* Since reset_control_reset toggles the reset signal, we prefer to use
|
||||
* reset_control_deassert. Additionally, since Audio driver uses
|
||||
* reset_control_bulk_deassert() which internally calls reset_control_deassert,
|
||||
* we must use reset_control_deassert, because consumers must not use
|
||||
* reset_control_reset on shared reset lines when reset_control_deassert has
|
||||
* been used.
|
||||
*/
|
||||
rc = reset_control_deassert(nvl->hdacodec_reset);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_GENERIC;
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: hdacodec reset_control_deassert failed, rc: %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
return status;
|
||||
@@ -558,69 +572,94 @@ static NV_STATUS nv_platform_get_iommu_availability(struct platform_device *plat
|
||||
struct device_node *niso_np_with_iommus = NULL;
|
||||
struct device_node *niso_np = NULL;
|
||||
struct device_node *iso_np = NULL;
|
||||
NvU32 value = 0;
|
||||
NV_STATUS status = NV_OK;
|
||||
bool single_smmu = NV_TRUE;
|
||||
|
||||
nv->iommus.iso_iommu_present = NV_FALSE;
|
||||
nv->iommus.niso_iommu_present = NV_FALSE;
|
||||
|
||||
/* NV_U32_MAX is used to indicate that the platform does not support SMMU */
|
||||
nv->iommus.dispIsoStreamId = NV_U32_MAX;
|
||||
nv->iommus.dispNisoStreamId = NV_U32_MAX;
|
||||
|
||||
iso_np = of_parse_phandle(np, "iommus", 0);
|
||||
if (iso_np && of_device_is_available(iso_np)) {
|
||||
|
||||
/* Parse ISO StreamID. Second entry in iommu property has Stream ID */
|
||||
if (of_property_read_u32_index(np, "iommus", 1, &value))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: nv_platform_get_iommu_availability, failed to parse ISO StreamID\n");
|
||||
status = NV_ERR_GENERIC;
|
||||
goto free_iso_np;
|
||||
}
|
||||
/* LSB 8 bits represent the Stream ID */
|
||||
nv->iommus.dispIsoStreamId = (value & 0xFF);
|
||||
nv->iommus.iso_iommu_present = NV_TRUE;
|
||||
}
|
||||
|
||||
single_smmu = of_property_read_bool(np, "single_stage_iso_smmu");
|
||||
nv->iommus.iso_iommu_present = !single_smmu;
|
||||
|
||||
niso_np = of_get_child_by_name(np, "nvdisplay-niso");
|
||||
if (niso_np) {
|
||||
niso_np_with_iommus = of_parse_phandle(niso_np, "iommus", 0);
|
||||
if (niso_np_with_iommus && of_device_is_available(niso_np_with_iommus)) {
|
||||
nv->iommus.niso_iommu_present = NV_TRUE;
|
||||
|
||||
/* Parse NISO StreamID. Second entry in iommu property has Stream ID */
|
||||
if (of_property_read_u32_index(niso_np, "iommus", 1, &value))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: nv_platform_get_iommu_availability, failed to parse NISO StreamID\n");
|
||||
status = NV_ERR_GENERIC;
|
||||
goto free_niso_np;
|
||||
}
|
||||
/* LSB 8 bits represent the Stream ID */
|
||||
nv->iommus.dispNisoStreamId = (value & 0xFF);
|
||||
}
|
||||
}
|
||||
|
||||
free_niso_np:
|
||||
if (niso_np_with_iommus)
|
||||
of_node_put(niso_np_with_iommus);
|
||||
|
||||
if (niso_np)
|
||||
of_node_put(niso_np);
|
||||
|
||||
free_iso_np:
|
||||
if (iso_np)
|
||||
of_node_put(iso_np);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
#define DISP_DT_SMMU_STREAM_ID_MASK 0xFF
|
||||
// This function gets called only for Tegra
|
||||
static NV_STATUS nv_platform_get_iso_niso_stream_ids(struct platform_device *plat_dev,
|
||||
nv_state_t *nv)
|
||||
{
|
||||
struct device_node *np = plat_dev->dev.of_node;
|
||||
NvU32 value = 0;
|
||||
NV_STATUS status = NV_OK;
|
||||
int ret = 0;
|
||||
|
||||
/* NV_U32_MAX is used to indicate that the platform does not support SMMU */
|
||||
nv->iommus.dispIsoStreamId = NV_U32_MAX;
|
||||
nv->iommus.dispNisoStreamId = NV_U32_MAX;
|
||||
|
||||
/* Parse ISO StreamID */
|
||||
ret = of_property_read_u32(np, "iso_sid", &value);
|
||||
if (ret == 0)
|
||||
{
|
||||
nv->iommus.dispIsoStreamId = (value & DISP_DT_SMMU_STREAM_ID_MASK);
|
||||
}
|
||||
else if (ret == -EINVAL)
|
||||
{
|
||||
/* iso_sid will not be specified in device tree if SMMU needs to be bypassed. Continue without failing */
|
||||
nv_printf(NV_DBG_INFO, "NVRM: nv_platform_get_iso_niso_stream_ids, iso_sid not specified under display node\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: nv_platform_get_iso_niso_stream_ids, iso_sid has invalid value\n");
|
||||
status = NV_ERR_GENERIC;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Parse NISO StreamID */
|
||||
ret = of_property_read_u32(np, "niso_sid", &value);
|
||||
if (ret == 0)
|
||||
{
|
||||
nv->iommus.dispNisoStreamId = (value & DISP_DT_SMMU_STREAM_ID_MASK);
|
||||
}
|
||||
else if (ret == -EINVAL)
|
||||
{
|
||||
/* niso_sid will not be specified in device tree if SMMU needs to be bypassed. Continue without failing */
|
||||
nv_printf(NV_DBG_INFO, "NVRM: nv_platform_get_iso_niso_stream_ids, niso_sid not specified under display node\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: nv_platform_get_iso_niso_stream_ids, niso_sid has invalid value\n");
|
||||
status = NV_ERR_GENERIC;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fail:
|
||||
return status;
|
||||
}
|
||||
|
||||
static int nv_platform_register_mapping_devs(struct platform_device *plat_dev,
|
||||
nv_state_t *nv)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
struct device_node *np = plat_dev->dev.of_node;
|
||||
struct device_node *niso_np = NULL;
|
||||
struct platform_device *niso_plat_dev = NULL;
|
||||
@@ -636,12 +675,7 @@ static int nv_platform_register_mapping_devs(struct platform_device *plat_dev,
|
||||
goto register_mapping_devs_end;
|
||||
}
|
||||
|
||||
#if defined(NV_DEVM_OF_PLATFORM_POPULATE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
|
||||
rc = devm_of_platform_populate(&plat_dev->dev);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: devm_of_platform_populate not present\n");
|
||||
rc = -ENOSYS;
|
||||
#endif
|
||||
if (rc != 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: devm_of_platform_populate failed\n");
|
||||
@@ -656,23 +690,13 @@ static int nv_platform_register_mapping_devs(struct platform_device *plat_dev,
|
||||
goto register_mapping_devs_end;
|
||||
}
|
||||
|
||||
#if defined(NV_OF_DMA_CONFIGURE_PRESENT)
|
||||
#if defined(NV_OF_DMA_CONFIGURE_HAS_INT_RETURN_TYPE)
|
||||
rc = of_dma_configure(
|
||||
#else
|
||||
rc = 0;
|
||||
of_dma_configure(
|
||||
#endif
|
||||
&niso_plat_dev->dev,
|
||||
niso_np
|
||||
#if NV_OF_DMA_CONFIGURE_ARGUMENT_COUNT > 2
|
||||
, true
|
||||
#endif
|
||||
);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: of_dma_configure not present\n");
|
||||
rc = -ENOSYS;
|
||||
#endif
|
||||
if (rc != 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: nv_of_dma_configure failed for niso\n");
|
||||
@@ -687,6 +711,9 @@ static int nv_platform_register_mapping_devs(struct platform_device *plat_dev,
|
||||
register_mapping_devs_end:
|
||||
of_node_put(niso_np);
|
||||
return rc;
|
||||
#else
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int nv_platform_parse_dcb(struct platform_device *plat_dev,
|
||||
@@ -694,13 +721,8 @@ static int nv_platform_parse_dcb(struct platform_device *plat_dev,
|
||||
{
|
||||
int ret;
|
||||
|
||||
#if defined(NV_OF_PROPERTY_COUNT_ELEMS_OF_SIZE_PRESENT)
|
||||
struct device_node *np = plat_dev->dev.of_node;
|
||||
ret = of_property_count_elems_of_size(np, "nvidia,dcb-image", sizeof(u8));
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "of_property_count_elems_of_size not present\n");
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
if (ret > 0)
|
||||
{
|
||||
nv->soc_dcb_size = ret;
|
||||
@@ -713,13 +735,8 @@ static int nv_platform_parse_dcb(struct platform_device *plat_dev,
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(NV_OF_PROPERTY_READ_VARIABLE_U8_ARRAY_PRESENT)
|
||||
ret = of_property_read_variable_u8_array(np, "nvidia,dcb-image",
|
||||
nv->soc_dcb_blob, 0, nv->soc_dcb_size);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "of_property_read_variable_u8_array not present\n");
|
||||
ret = -ENOSYS;
|
||||
#endif
|
||||
if (IS_ERR(&ret))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "failed to read dcb blob");
|
||||
@@ -821,7 +838,7 @@ static int nv_platform_device_display_probe(struct platform_device *plat_dev)
|
||||
|
||||
nv->os_state = (void *) nvl;
|
||||
|
||||
// Check ISO/NISO SMMU status and parse StreamIDs
|
||||
// Check ISO/NISO SMMU status
|
||||
status = nv_platform_get_iommu_availability(plat_dev, nv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
@@ -829,6 +846,14 @@ static int nv_platform_device_display_probe(struct platform_device *plat_dev)
|
||||
goto err_release_mem_region_regs;
|
||||
}
|
||||
|
||||
// Parse ISO/NISO SMMU StreamIDs
|
||||
status = nv_platform_get_iso_niso_stream_ids(plat_dev, nv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: nv_platform_device_display_probe: parsing ISO/NISO StreamIDs failed\n");
|
||||
goto err_release_mem_region_regs;
|
||||
}
|
||||
|
||||
rc = nv_platform_register_mapping_devs(plat_dev, nv);
|
||||
if (rc != 0)
|
||||
{
|
||||
@@ -848,7 +873,7 @@ static int nv_platform_device_display_probe(struct platform_device *plat_dev)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate hdacodecregs memory\n");
|
||||
rc = -ENOMEM;
|
||||
goto err_release_mem_region_regs;
|
||||
goto err_remove_dpaux_device;
|
||||
}
|
||||
os_mem_set(nv->hdacodec_regs, 0, sizeof(*(nv->hdacodec_regs)));
|
||||
|
||||
@@ -873,6 +898,7 @@ static int nv_platform_device_display_probe(struct platform_device *plat_dev)
|
||||
nv->hdacodec_regs->cpu_address = res_addr;
|
||||
nv->hdacodec_regs->size = res_size;
|
||||
nv->soc_is_dpalt_mode_supported = false;
|
||||
nv->soc_is_hfrp_supported = false;
|
||||
|
||||
nv->hdacodec_irq = platform_get_irq_byname(plat_dev, "hdacodec");
|
||||
if (nv->hdacodec_irq < 0)
|
||||
@@ -897,6 +923,12 @@ static int nv_platform_device_display_probe(struct platform_device *plat_dev)
|
||||
}
|
||||
}
|
||||
|
||||
rc = of_property_read_bool(nvl->dev->of_node, "nvidia,hfrp-supported");
|
||||
if (rc == true)
|
||||
{
|
||||
nv->soc_is_hfrp_supported = true;
|
||||
}
|
||||
|
||||
NV_KMALLOC(nv->mipical_regs, sizeof(*(nv->mipical_regs)));
|
||||
if (nv->mipical_regs == NULL)
|
||||
{
|
||||
@@ -996,6 +1028,25 @@ static int nv_platform_device_display_probe(struct platform_device *plat_dev)
|
||||
nvl->mipi_cal_reset = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* In T23x, HDACODEC is part of the same power domain as NVDisplay, so
|
||||
* unpowergating the DISP domain also results in the HDACODEC reset
|
||||
* being de-asserted. However, in T26x, HDACODEC is being moved
|
||||
* out to a separate always-on domain, so we need to explicitly de-assert
|
||||
* the HDACODEC reset in RM. We don't have good way to differentiate
|
||||
* between T23x vs T264x at this place. So if there is failure to read
|
||||
* "hdacodec_reset" from DT silently ignore it for now. In long term we
|
||||
* should really look into using the devm_reset_control_bulk* APIs and
|
||||
* see if this is feasible if we're ultimately just getting and
|
||||
* asserting/deasserting all of the resets specified in DT together all of
|
||||
* the time, and if there's no scenarios in which we need to only use a
|
||||
* specific set of reset(s) at a given point.
|
||||
*/
|
||||
nvl->hdacodec_reset = devm_reset_control_get(nvl->dev, "hdacodec_reset");
|
||||
if (IS_ERR(nvl->hdacodec_reset))
|
||||
{
|
||||
nvl->hdacodec_reset = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
status = nv_imp_icc_get(nv);
|
||||
@@ -1098,21 +1149,21 @@ static int nv_platform_device_display_probe(struct platform_device *plat_dev)
|
||||
* TODO: procfs, vt_switch, dynamic_power_management
|
||||
*/
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
dma_set_mask(nv->dma_dev->dev, DMA_BIT_MASK(39));
|
||||
#if defined(NV_DMA_SET_MASK_AND_COHERENT_PRESENT)
|
||||
if (nv->niso_dma_dev != NULL)
|
||||
{
|
||||
dma_set_mask_and_coherent(nv->niso_dma_dev->dev, DMA_BIT_MASK(39));
|
||||
}
|
||||
#else
|
||||
nv_printf(NV_DBG_INFO, "NVRM: Using default 32-bit DMA mask\n");
|
||||
#endif
|
||||
|
||||
rc = os_alloc_mutex(&nvl->soc_bh_mutex);
|
||||
if (rc != 0)
|
||||
{
|
||||
goto err_remove_device;
|
||||
}
|
||||
|
||||
return rc;
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_device:
|
||||
LOCK_NV_LINUX_DEVICES();
|
||||
@@ -1127,24 +1178,27 @@ err_destroy_lock:
|
||||
err_put_icc_handle:
|
||||
nv_imp_icc_put(nv);
|
||||
err_destroy_clk_handles:
|
||||
nv_clk_clear_handles(nv);
|
||||
err_remove_dpaux_device:
|
||||
nv_platform_free_device_dpaux(nv);
|
||||
if (!skip_clk_rsts)
|
||||
{
|
||||
nv_clk_clear_handles(nv);
|
||||
}
|
||||
err_release_mem_mipical_region_regs:
|
||||
release_mem_region(nv->mipical_regs->cpu_address, nv->mipical_regs->size);
|
||||
err_free_mipical_regs:
|
||||
NV_KFREE(nv->mipical_regs, sizeof(*(nv->mipical_regs)));
|
||||
err_release_mem_hdacodec_region_regs:
|
||||
release_mem_region(nv->hdacodec_regs->cpu_address, nv->hdacodec_regs->size);
|
||||
err_release_mem_region_regs:
|
||||
release_mem_region(nv->regs->cpu_address, nv->regs->size);
|
||||
err_free_nv_codec_regs:
|
||||
NV_KFREE(nv->hdacodec_regs, sizeof(*(nv->hdacodec_regs)));
|
||||
err_remove_dpaux_device:
|
||||
nv_platform_free_device_dpaux(nv);
|
||||
err_release_mem_region_regs:
|
||||
release_mem_region(nv->regs->cpu_address, nv->regs->size);
|
||||
err_free_nv_regs:
|
||||
NV_KFREE(nv->regs, sizeof(*(nv->regs)));
|
||||
err_free_nvl:
|
||||
NV_KFREE(nvl, sizeof(*nvl));
|
||||
platform_set_drvdata(plat_dev, NULL);
|
||||
NV_KFREE(nvl, sizeof(*nvl));
|
||||
err_free_stack:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
@@ -1253,40 +1307,18 @@ static int nv_platform_device_probe(struct platform_device *plat_dev)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (plat_dev->dev.of_node)
|
||||
{
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
|
||||
rc = nv_platform_device_display_probe(plat_dev);
|
||||
rc = nv_platform_device_display_probe(plat_dev);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
|
||||
rc = nv_platform_device_display_probe(plat_dev);
|
||||
#endif
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void nv_platform_device_remove(struct platform_device *plat_dev)
|
||||
{
|
||||
if (plat_dev->dev.of_node)
|
||||
{
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
|
||||
nv_platform_device_display_remove(plat_dev);
|
||||
nv_platform_device_display_remove(plat_dev);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
|
||||
nv_platform_device_display_remove(plat_dev);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(NV_PLATFORM_DRIVER_STRUCT_REMOVE_RETURNS_VOID) /* Linux v6.11 */
|
||||
@@ -1306,6 +1338,7 @@ static int nv_platform_device_remove_wrapper(struct platform_device *pdev)
|
||||
const struct of_device_id nv_platform_device_table[] =
|
||||
{
|
||||
{ .compatible = "nvidia,tegra234-display",},
|
||||
{ .compatible = "nvidia,tegra264-display",},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, nv_platform_device_table);
|
||||
@@ -1332,6 +1365,11 @@ int nv_platform_count_devices(void)
|
||||
int count = 0;
|
||||
struct device_node *np = NULL;
|
||||
|
||||
if (NVreg_RegisterPlatformDeviceDriver == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
while ((np = of_find_matching_node(np, nv_platform_device_table)))
|
||||
{
|
||||
count++;
|
||||
@@ -1343,7 +1381,12 @@ int nv_platform_count_devices(void)
|
||||
int nv_platform_register_driver(void)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
return platform_driver_register(&nv_platform_driver);
|
||||
if (NVreg_RegisterPlatformDeviceDriver > 0)
|
||||
{
|
||||
return platform_driver_register(&nv_platform_driver);
|
||||
}
|
||||
|
||||
return 0;
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Not registering platform driver\n");
|
||||
return -1;
|
||||
@@ -1353,7 +1396,10 @@ int nv_platform_register_driver(void)
|
||||
void nv_platform_unregister_driver(void)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
platform_driver_unregister(&nv_platform_driver);
|
||||
if (NVreg_RegisterPlatformDeviceDriver > 0)
|
||||
{
|
||||
platform_driver_unregister(&nv_platform_driver);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1363,13 +1409,87 @@ unsigned int NV_API_CALL nv_soc_fuse_register_read (unsigned int addr)
|
||||
{
|
||||
unsigned int data = 0;
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_tegra_fuse_control_read
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE && NV_IS_EXPORT_SYMBOL_PRESENT_tegra_fuse_control_read
|
||||
tegra_fuse_control_read ((unsigned long)(addr), &data);
|
||||
#endif
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_send_cmd
|
||||
extern int tsec_comms_send_cmd(void* cmd, unsigned int queue_id, nv_soc_tsec_cb_func_t cb_func, void* cb_context);
|
||||
unsigned int NV_API_CALL nv_soc_tsec_send_cmd(void* cmd, nv_soc_tsec_cb_func_t cb_func, void* cb_context)
|
||||
{
|
||||
return (unsigned int)tsec_comms_send_cmd(cmd, 0, cb_func, cb_context);
|
||||
}
|
||||
#else
|
||||
unsigned int NV_API_CALL nv_soc_tsec_send_cmd(void* cmd, nv_soc_tsec_cb_func_t cb_func, void* cb_context)
|
||||
{
|
||||
return (unsigned int)NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_send_cmd
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_set_init_cb
|
||||
extern int tsec_comms_set_init_cb(nv_soc_tsec_cb_func_t cb_func, void* cb_context);
|
||||
unsigned int NV_API_CALL nv_soc_tsec_event_register(nv_soc_tsec_cb_func_t cb_func, void* cb_context, NvBool is_init_event)
|
||||
{
|
||||
if (is_init_event)
|
||||
{
|
||||
return (unsigned int)tsec_comms_set_init_cb(cb_func, cb_context);
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: Add DeInit Event support for TSEC if required
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
#else
|
||||
unsigned int NV_API_CALL nv_soc_tsec_event_register(nv_soc_tsec_cb_func_t cb_func, void* cb_context, NvBool is_init_event)
|
||||
{
|
||||
return (unsigned int)NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_set_init_cb
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_clear_init_cb
|
||||
extern void tsec_comms_clear_init_cb(void);
|
||||
unsigned int NV_API_CALL nv_soc_tsec_event_unregister(NvBool is_init_event)
|
||||
{
|
||||
if (is_init_event)
|
||||
{
|
||||
tsec_comms_clear_init_cb();
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: Add DeInit Event support for TSEC if required
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
#else
|
||||
unsigned int NV_API_CALL nv_soc_tsec_event_unregister(NvBool is_init_event)
|
||||
{
|
||||
return (unsigned int)NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_clear_init_cb
|
||||
|
||||
void* NV_API_CALL nv_soc_tsec_alloc_mem_desc(NvU32 num_bytes, NvU32 *flcn_addr)
|
||||
{
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_alloc_mem_from_gscco
|
||||
extern void *tsec_comms_alloc_mem_from_gscco(u32 size_in_bytes, u32 *gscco_offset);
|
||||
return tsec_comms_alloc_mem_from_gscco(num_bytes, flcn_addr);
|
||||
#else
|
||||
return NULL;
|
||||
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_alloc_mem_from_gscco
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_soc_tsec_free_mem_desc(void *mem_desc)
|
||||
{
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_free_gscco_mem
|
||||
extern void tsec_comms_free_gscco_mem(void *page_va);
|
||||
tsec_comms_free_gscco_mem(mem_desc);
|
||||
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_free_gscco_mem
|
||||
}
|
||||
|
||||
NV_STATUS nv_get_valid_window_head_mask(nv_state_t *nv, NvU64 *window_head_mask)
|
||||
{
|
||||
#if NV_SUPPORTS_PLATFORM_DEVICE
|
||||
|
||||
@@ -54,6 +54,7 @@ static struct proc_dir_entry *proc_nvidia_warnings;
|
||||
static struct proc_dir_entry *proc_nvidia_patches;
|
||||
static struct proc_dir_entry *proc_nvidia_gpus;
|
||||
|
||||
extern char *NVreg_CoherentGPUMemoryMode;
|
||||
extern char *NVreg_RegistryDwords;
|
||||
extern char *NVreg_RegistryDwordsPerDevice;
|
||||
extern char *NVreg_RmMsg;
|
||||
@@ -429,6 +430,8 @@ nv_procfs_read_params(
|
||||
for (i = 0; (entry = &nv_parms[i])->name != NULL; i++)
|
||||
seq_printf(s, "%s: %u\n", entry->name, *entry->data);
|
||||
|
||||
seq_printf(s, "CoherentGPUMemoryMode: \"%s\"\n",
|
||||
(NVreg_CoherentGPUMemoryMode != NULL) ? NVreg_CoherentGPUMemoryMode : "");
|
||||
seq_printf(s, "RegistryDwords: \"%s\"\n",
|
||||
(NVreg_RegistryDwords != NULL) ? NVreg_RegistryDwords : "");
|
||||
seq_printf(s, "RegistryDwordsPerDevice: \"%s\"\n",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2006-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2006-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -450,6 +450,26 @@
|
||||
#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement
|
||||
#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT)
|
||||
|
||||
/*
|
||||
* Option: CoherentGPUMemoryMode
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* This option can be set to control how GPU Memory is accessed through
|
||||
* the coherent link.
|
||||
*
|
||||
* This option has no effect on platforms that do not support onlining
|
||||
* device memory to a NUMA node.
|
||||
*
|
||||
* Possible string values:
|
||||
*
|
||||
* "driver" : disable onlining coherent memory to the OS as a NUMA node. The driver
|
||||
* will manage it in this case
|
||||
* "numa" (or unset) : enable onlining coherent memory to the OS as a NUMA node (default)
|
||||
*/
|
||||
#define __NV_COHERENT_GPU_MEMORY_MODE CoherentGPUMemoryMode
|
||||
#define NV_REG_COHERENT_GPU_MEMORY_MODE NV_REG_STRING(__NV_COHERENT_GPU_MEMORY_MODE)
|
||||
|
||||
/*
|
||||
* Option: GpuBlacklist
|
||||
*
|
||||
@@ -659,6 +679,23 @@
|
||||
#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver
|
||||
#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER)
|
||||
|
||||
/*
|
||||
* Option: RegisterPlatformDeviceDriver
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA driver will register with
|
||||
* platform subsystem.
|
||||
*
|
||||
* Possible values:
|
||||
*
|
||||
* 1 - register as platform driver (default)
|
||||
* 0 - do not register as platform driver
|
||||
*/
|
||||
|
||||
#define __NV_REGISTER_PLATFORM_DEVICE_DRIVER RegisterPlatformDeviceDriver
|
||||
#define NV_REG_REGISTER_PLATFORM_DEVICE_DRIVER NV_REG_STRING(__NV_REGISTER_PLATFORM_DEVICE_DRIVER)
|
||||
|
||||
/*
|
||||
* Option: EnablePCIERelaxedOrderingMode
|
||||
*
|
||||
@@ -894,20 +931,19 @@
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* This option is applicable only on coherent systems with BAR1 enabled to allow
|
||||
* maximum bandwidth between GPU and a third party device over a dedicated
|
||||
* PCIe link instead of over C2C for GPUDirect RDMA use-cases.
|
||||
* Such a config is only supported for a specific topology which is checked by
|
||||
* This option allows users to override the PCI topology validation enforced by
|
||||
* the GPU driver's dma-buf and nv-p2p subsystems.
|
||||
*
|
||||
* This option allows the user to override the driver's topology check.
|
||||
*
|
||||
* Possible values:
|
||||
* 0 - Do not override topology check (default).
|
||||
* 1 - Override topology check.
|
||||
* 0 - Driver's topology check to allow or deny access (default).
|
||||
* 1 - Override driver's topology check to allow access.
|
||||
* 2 - Override driver's topology check to deny access.
|
||||
*/
|
||||
#define __NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE GrdmaPciTopoCheckOverride
|
||||
#define NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE NV_REG_STRING(__NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE)
|
||||
#define NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DEFAULT 0
|
||||
#define NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_ALLOW_ACCESS 1
|
||||
#define NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DENY_ACCESS 2
|
||||
|
||||
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
|
||||
|
||||
@@ -944,10 +980,12 @@ NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PLATFORM_DEVICE_DRIVER, 1);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_RESIZABLE_BAR, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_NONBLOCKING_OPEN, 1);
|
||||
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_COHERENT_GPU_MEMORY_MODE, NULL);
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL);
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL);
|
||||
@@ -959,7 +997,8 @@ NV_DEFINE_REG_STRING_ENTRY(__NV_RM_NVLINK_BW, NULL);
|
||||
NV_DEFINE_REG_ENTRY(__NV_RM_NVLINK_BW_LINK_COUNT, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IMEX_CHANNEL_COUNT, 2048);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_CREATE_IMEX_CHANNEL_0, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE,
|
||||
NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DEFAULT);
|
||||
|
||||
/*
|
||||
*----------------registry database definition----------------------
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -116,21 +116,20 @@ NV_STATUS NV_API_CALL nv_alloc_user_mapping(
|
||||
nv_alloc_t *at = pAllocPrivate;
|
||||
|
||||
if (at->flags.contig)
|
||||
*pUserAddress = (at->page_table[0]->phys_addr + (pageIndex * PAGE_SIZE) + pageOffset);
|
||||
*pUserAddress = (at->page_table[0].phys_addr + (pageIndex * PAGE_SIZE) + pageOffset);
|
||||
else
|
||||
*pUserAddress = (at->page_table[pageIndex]->phys_addr + pageOffset);
|
||||
*pUserAddress = (at->page_table[pageIndex].phys_addr + pageOffset);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_free_user_mapping(
|
||||
void NV_API_CALL nv_free_user_mapping(
|
||||
nv_state_t *nv,
|
||||
void *pAllocPrivate,
|
||||
NvU64 userAddress,
|
||||
void *pPrivate
|
||||
)
|
||||
{
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -187,7 +187,7 @@ static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type)
|
||||
{
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr = &at->page_table[i];
|
||||
page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
pages[i] = (unsigned long)page_address(page);
|
||||
@@ -211,7 +211,7 @@ static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type)
|
||||
else
|
||||
{
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
nv_set_contig_memory_type(at->page_table[i], 1, type);
|
||||
nv_set_contig_memory_type(&at->page_table[i], 1, type);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,7 +322,7 @@ static NV_STATUS nv_alloc_coherent_pages(
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr = &at->page_table[i];
|
||||
|
||||
page_ptr->virt_addr = virt_addr + i * PAGE_SIZE;
|
||||
page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr);
|
||||
@@ -330,9 +330,7 @@ static NV_STATUS nv_alloc_coherent_pages(
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_UNCACHED);
|
||||
nv_set_contig_memory_type(&at->page_table[0], at->num_pages, NV_MEMORY_UNCACHED);
|
||||
}
|
||||
|
||||
at->flags.coherent = NV_TRUE;
|
||||
@@ -346,13 +344,11 @@ static void nv_free_coherent_pages(
|
||||
nvidia_pte_t *page_ptr;
|
||||
struct device *dev = at->dev;
|
||||
|
||||
page_ptr = at->page_table[0];
|
||||
page_ptr = &at->page_table[0];
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_WRITEBACK);
|
||||
nv_set_contig_memory_type(page_ptr, at->num_pages, NV_MEMORY_WRITEBACK);
|
||||
}
|
||||
|
||||
dma_free_coherent(dev, at->num_pages * PAGE_SIZE,
|
||||
@@ -366,7 +362,7 @@ NV_STATUS nv_alloc_contig_pages(
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_pte_t *page_ptr;
|
||||
NvU32 i, j;
|
||||
NvU32 i;
|
||||
unsigned int gfp_mask;
|
||||
unsigned long virt_addr = 0;
|
||||
NvU64 phys_addr;
|
||||
@@ -431,18 +427,14 @@ NV_STATUS nv_alloc_contig_pages(
|
||||
goto failed;
|
||||
}
|
||||
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr = &at->page_table[i];
|
||||
page_ptr->phys_addr = phys_addr;
|
||||
page_ptr->virt_addr = virt_addr;
|
||||
|
||||
NV_MAYBE_RESERVE_PAGE(page_ptr);
|
||||
}
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_UNCACHED);
|
||||
nv_set_contig_memory_type(&at->page_table[0], at->num_pages, NV_MEMORY_UNCACHED);
|
||||
}
|
||||
|
||||
at->flags.coherent = NV_FALSE;
|
||||
@@ -450,13 +442,7 @@ NV_STATUS nv_alloc_contig_pages(
|
||||
return NV_OK;
|
||||
|
||||
failed:
|
||||
if (i > 0)
|
||||
{
|
||||
for (j = 0; j < i; j++)
|
||||
NV_MAYBE_UNRESERVE_PAGE(at->page_table[j]);
|
||||
}
|
||||
|
||||
page_ptr = at->page_table[0];
|
||||
page_ptr = &at->page_table[0];
|
||||
|
||||
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
|
||||
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
|
||||
@@ -472,7 +458,6 @@ void nv_free_contig_pages(
|
||||
)
|
||||
{
|
||||
nvidia_pte_t *page_ptr;
|
||||
unsigned int i;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
|
||||
@@ -482,19 +467,10 @@ void nv_free_contig_pages(
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_WRITEBACK);
|
||||
nv_set_contig_memory_type(&at->page_table[0], at->num_pages, NV_MEMORY_WRITEBACK);
|
||||
}
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
|
||||
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
|
||||
}
|
||||
|
||||
page_ptr = at->page_table[0];
|
||||
page_ptr = &at->page_table[0];
|
||||
|
||||
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
|
||||
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
|
||||
@@ -596,11 +572,10 @@ NV_STATUS nv_alloc_system_pages(
|
||||
}
|
||||
#endif
|
||||
|
||||
page_ptr = at->page_table[base_page_idx];
|
||||
page_ptr = &at->page_table[base_page_idx];
|
||||
page_ptr->phys_addr = phys_addr;
|
||||
page_ptr->virt_addr = sub_page_virt_addr;
|
||||
|
||||
NV_MAYBE_RESERVE_PAGE(page_ptr);
|
||||
sub_page_offset += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
@@ -615,8 +590,7 @@ failed:
|
||||
{
|
||||
for (j = 0; j < i; j++)
|
||||
{
|
||||
page_ptr = at->page_table[j * os_pages_in_page];
|
||||
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
|
||||
page_ptr = &at->page_table[j * os_pages_in_page];
|
||||
|
||||
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
|
||||
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
|
||||
@@ -645,16 +619,9 @@ void nv_free_system_pages(
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
nv_set_memory_type(at, NV_MEMORY_WRITEBACK);
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
|
||||
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
|
||||
}
|
||||
|
||||
for (i = 0; i < at->num_pages; i += os_pages_in_page)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr = &at->page_table[i];
|
||||
|
||||
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
|
||||
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
|
||||
@@ -670,21 +637,18 @@ static NvUPtr nv_vmap(struct page **pages, NvU32 page_count,
|
||||
void *ptr;
|
||||
pgprot_t prot = PAGE_KERNEL;
|
||||
#if defined(NVCPU_X86_64)
|
||||
#if defined(PAGE_KERNEL_NOENC)
|
||||
if (unencrypted)
|
||||
{
|
||||
prot = cached ? nv_adjust_pgprot(PAGE_KERNEL_NOENC) :
|
||||
nv_adjust_pgprot(NV_PAGE_KERNEL_NOCACHE_NOENC);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
prot = cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE;
|
||||
}
|
||||
#elif defined(NVCPU_AARCH64)
|
||||
prot = cached ? PAGE_KERNEL : NV_PGPROT_UNCACHED(PAGE_KERNEL);
|
||||
#endif
|
||||
/* All memory cached in PPC64LE; can't honor 'cached' input. */
|
||||
ptr = vmap(pages, page_count, VM_MAP, prot);
|
||||
NV_MEMDBG_ADD(ptr, page_count * PAGE_SIZE);
|
||||
|
||||
|
||||
@@ -71,9 +71,7 @@
|
||||
|
||||
#include <asm/cache.h>
|
||||
|
||||
#if defined(NV_SOUND_HDAUDIO_H_PRESENT)
|
||||
#include "sound/hdaudio.h"
|
||||
#endif
|
||||
|
||||
#if defined(NV_SOUND_HDA_CODEC_H_PRESENT)
|
||||
#include <sound/core.h>
|
||||
@@ -91,6 +89,9 @@
|
||||
|
||||
#include <linux/ioport.h>
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_domain.h>
|
||||
|
||||
#if defined(NV_LINUX_CC_PLATFORM_H_PRESENT)
|
||||
#include <linux/cc_platform.h>
|
||||
#endif
|
||||
@@ -99,12 +100,9 @@
|
||||
#include <asm/mshyperv.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_ASM_CPUFEATURE_H_PRESENT)
|
||||
#include <asm/cpufeature.h>
|
||||
#endif
|
||||
|
||||
#include "conftest/patches.h"
|
||||
|
||||
#include "detect-self-hosted.h"
|
||||
|
||||
#define RM_THRESHOLD_TOTAL_IRQ_COUNT 100000
|
||||
@@ -181,7 +179,6 @@ struct semaphore nv_system_power_state_lock;
|
||||
#endif
|
||||
|
||||
void *nvidia_p2p_page_t_cache;
|
||||
static void *nvidia_pte_t_cache;
|
||||
void *nvidia_stack_t_cache;
|
||||
static nvidia_stack_t *__nv_init_sp;
|
||||
|
||||
@@ -210,7 +207,7 @@ NvBool nv_ats_supported;
|
||||
***/
|
||||
|
||||
/* nvos_ functions.. do not take a state device parameter */
|
||||
static int nvos_count_devices(void);
|
||||
static int nvos_count_devices(int *, int *);
|
||||
|
||||
static nv_alloc_t *nvos_create_alloc(struct device *, NvU64);
|
||||
static int nvos_free_alloc(nv_alloc_t *);
|
||||
@@ -226,7 +223,6 @@ static int nvidia_ctl_close (struct inode *, struct file *);
|
||||
|
||||
const char *nv_device_name = MODULE_NAME;
|
||||
static const char *nvidia_stack_cache_name = MODULE_NAME "_stack_cache";
|
||||
static const char *nvidia_pte_cache_name = MODULE_NAME "_pte_cache";
|
||||
static const char *nvidia_p2p_page_cache_name = MODULE_NAME "_p2p_page_cache";
|
||||
|
||||
static int nvidia_open (struct inode *, struct file *);
|
||||
@@ -382,8 +378,15 @@ nv_alloc_t *nvos_create_alloc(
|
||||
)
|
||||
{
|
||||
nv_alloc_t *at;
|
||||
NvU64 pt_size;
|
||||
unsigned int i;
|
||||
NvU64 pt_size = num_pages * sizeof(nvidia_pte_t);
|
||||
|
||||
// Sanity check inputs
|
||||
if ((num_pages != 0) && ((pt_size / num_pages) != sizeof(nvidia_pte_t)))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Invalid page table allocation - Number of pages exceeds max value.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
NV_KZALLOC(at, sizeof(nv_alloc_t));
|
||||
if (at == NULL)
|
||||
@@ -392,50 +395,27 @@ nv_alloc_t *nvos_create_alloc(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
at->dev = dev;
|
||||
pt_size = num_pages * sizeof(nvidia_pte_t *);
|
||||
//
|
||||
// Check for multiplication overflow and check whether num_pages value can fit in at->num_pages.
|
||||
//
|
||||
if ((num_pages != 0) && ((pt_size / num_pages) != sizeof(nvidia_pte_t*)))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Invalid page table allocation - Number of pages exceeds max value.\n");
|
||||
NV_KFREE(at, sizeof(nv_alloc_t));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// at->num_pages is an unsigned int, check that the requested page count fits
|
||||
at->num_pages = num_pages;
|
||||
if (at->num_pages != num_pages)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Invalid page table allocation - requested size overflows.\n");
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Invalid page table allocation - requested size overflows.\n");
|
||||
NV_KFREE(at, sizeof(nv_alloc_t));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (os_alloc_mem((void **)&at->page_table, pt_size) != NV_OK)
|
||||
at->page_table = kvzalloc(pt_size, NV_GFP_KERNEL);
|
||||
if (at->page_table == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n");
|
||||
NV_KFREE(at, sizeof(nv_alloc_t));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(at->page_table, 0, pt_size);
|
||||
NV_ATOMIC_SET(at->usage_count, 0);
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
at->page_table[i] = NV_KMEM_CACHE_ALLOC(nvidia_pte_t_cache);
|
||||
if (at->page_table[i] == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: failed to allocate page table entry\n");
|
||||
nvos_free_alloc(at);
|
||||
return NULL;
|
||||
}
|
||||
memset(at->page_table[i], 0, sizeof(nvidia_pte_t));
|
||||
}
|
||||
|
||||
at->pid = os_get_current_process();
|
||||
at->dev = dev;
|
||||
|
||||
return at;
|
||||
}
|
||||
@@ -445,20 +425,13 @@ int nvos_free_alloc(
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (at == NULL)
|
||||
return -1;
|
||||
|
||||
if (NV_ATOMIC_READ(at->usage_count))
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
if (at->page_table[i] != NULL)
|
||||
NV_KMEM_CACHE_FREE(at->page_table[i], nvidia_pte_t_cache);
|
||||
}
|
||||
os_free_mem(at->page_table);
|
||||
kvfree(at->page_table);
|
||||
|
||||
NV_KFREE(at, sizeof(nv_alloc_t));
|
||||
|
||||
@@ -471,7 +444,6 @@ nv_module_resources_exit(nv_stack_t *sp)
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache);
|
||||
NV_KMEM_CACHE_DESTROY(nvidia_pte_t_cache);
|
||||
NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache);
|
||||
}
|
||||
|
||||
@@ -489,15 +461,6 @@ nv_module_resources_init(nv_stack_t **sp)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvidia_pte_t_cache = NV_KMEM_CACHE_CREATE(nvidia_pte_cache_name,
|
||||
nvidia_pte_t);
|
||||
if (nvidia_pte_t_cache == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: nvidia_pte_t cache allocation failed.\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvidia_p2p_page_t_cache = NV_KMEM_CACHE_CREATE(nvidia_p2p_page_cache_name,
|
||||
nvidia_p2p_page_t);
|
||||
if (nvidia_p2p_page_t_cache == NULL)
|
||||
@@ -519,7 +482,6 @@ exit:
|
||||
nv_kmem_cache_free_stack(*sp);
|
||||
|
||||
NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache);
|
||||
NV_KMEM_CACHE_DESTROY(nvidia_pte_t_cache);
|
||||
NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache);
|
||||
}
|
||||
|
||||
@@ -533,10 +495,6 @@ nvlink_drivers_exit(void)
|
||||
nvswitch_exit();
|
||||
#endif
|
||||
|
||||
#if defined(NVCPU_PPC64LE)
|
||||
ibmnpu_exit();
|
||||
#endif
|
||||
|
||||
nvlink_core_exit();
|
||||
}
|
||||
|
||||
@@ -552,24 +510,11 @@ nvlink_drivers_init(void)
|
||||
return rc;
|
||||
}
|
||||
|
||||
#if defined(NVCPU_PPC64LE)
|
||||
rc = ibmnpu_init();
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO, "NVRM: IBM NPU init failed.\n");
|
||||
nvlink_core_exit();
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if NVCPU_IS_64_BITS
|
||||
rc = nvswitch_init();
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO, "NVRM: NVSwitch init failed.\n");
|
||||
#if defined(NVCPU_PPC64LE)
|
||||
ibmnpu_exit();
|
||||
#endif
|
||||
nvlink_core_exit();
|
||||
}
|
||||
#endif
|
||||
@@ -653,20 +598,6 @@ nv_registry_keys_init(nv_stack_t *sp)
|
||||
nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device);
|
||||
NvU32 data;
|
||||
|
||||
/*
|
||||
* Determine whether we should allow user-mode NUMA onlining of device
|
||||
* memory.
|
||||
*/
|
||||
if (NVCPU_IS_PPC64LE)
|
||||
{
|
||||
if (NVreg_EnableUserNUMAManagement)
|
||||
{
|
||||
/* Force on the core RM registry key to match. */
|
||||
status = rm_write_registry_dword(sp, nv, "RMNumaOnlining", 1);
|
||||
WARN_ON(status != NV_OK);
|
||||
}
|
||||
}
|
||||
|
||||
status = rm_read_registry_dword(sp, nv, NV_DMA_REMAP_PEER_MMIO, &data);
|
||||
if (status == NV_OK)
|
||||
{
|
||||
@@ -893,8 +824,10 @@ static int __init nvidia_init_module(void)
|
||||
{
|
||||
int rc;
|
||||
NvU32 count;
|
||||
NvBool warn_unprobed = NV_FALSE;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
const NvBool is_nvswitch_present = os_is_nvswitch_present();
|
||||
int num_pci_devices = 0, num_platform_devices = 0;
|
||||
|
||||
nv_memdbg_init();
|
||||
|
||||
@@ -926,7 +859,7 @@ static int __init nvidia_init_module(void)
|
||||
goto caps_imex_exit;
|
||||
}
|
||||
|
||||
count = nvos_count_devices();
|
||||
count = nvos_count_devices(&num_pci_devices, &num_platform_devices);
|
||||
if ((count == 0) && (!is_nvswitch_present))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA GPU found.\n");
|
||||
@@ -940,7 +873,24 @@ static int __init nvidia_init_module(void)
|
||||
goto module_exit;
|
||||
}
|
||||
|
||||
if (num_probed_nv_devices != count)
|
||||
warn_unprobed = (num_probed_nv_devices != count);
|
||||
WARN_ON(num_probed_nv_devices > count);
|
||||
|
||||
if (num_platform_devices > 0 &&
|
||||
!NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE)
|
||||
{
|
||||
// RM was configured for tegra display but some conftests failed
|
||||
nv_printf(NV_DBG_WARNINGS,
|
||||
"NVRM: Failed to probe Tegra Display platform device.\n");
|
||||
nv_printf(NV_DBG_WARNINGS,
|
||||
"NVRM: This kernel is not compatible with Tegra Display.\n");
|
||||
|
||||
// Warn if any PCI GPUs weren't probed
|
||||
if (count > num_probed_nv_devices)
|
||||
warn_unprobed = (count - num_probed_nv_devices != num_platform_devices);
|
||||
}
|
||||
|
||||
if (warn_unprobed)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: The NVIDIA probe routine was not called for %d device(s).\n",
|
||||
@@ -1340,6 +1290,12 @@ static int validate_numa_start_state(nv_linux_state_t *nvl)
|
||||
return rc;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances)
|
||||
{
|
||||
*num_instances = nv->num_dpaux_instance;
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
void NV_API_CALL
|
||||
nv_schedule_uvm_isr(nv_state_t *nv)
|
||||
{
|
||||
@@ -1447,8 +1403,7 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
#endif
|
||||
|
||||
if (((!(nv->flags & NV_FLAG_USES_MSI)) && (!(nv->flags & NV_FLAG_USES_MSIX)))
|
||||
&& (nv->interrupt_line == 0) && !(nv->flags & NV_FLAG_SOC_DISPLAY)
|
||||
&& !(nv->flags & NV_FLAG_SOC_IGPU))
|
||||
&& (nv->interrupt_line == 0) && !(nv->flags & NV_FLAG_SOC_DISPLAY))
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"No interrupts of any type are available. Cannot use this GPU.\n");
|
||||
@@ -1461,6 +1416,7 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
{
|
||||
if (nv->flags & NV_FLAG_SOC_DISPLAY)
|
||||
{
|
||||
rc = nv_soc_register_irqs(nv);
|
||||
}
|
||||
else if (!(nv->flags & NV_FLAG_USES_MSIX))
|
||||
{
|
||||
@@ -1519,13 +1475,13 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
if (!rm_init_adapter(sp, nv))
|
||||
{
|
||||
if (!(nv->flags & NV_FLAG_USES_MSIX) &&
|
||||
!(nv->flags & NV_FLAG_SOC_DISPLAY) &&
|
||||
!(nv->flags & NV_FLAG_SOC_IGPU))
|
||||
!(nv->flags & NV_FLAG_SOC_DISPLAY))
|
||||
{
|
||||
free_irq(nv->interrupt_line, (void *) nvl);
|
||||
}
|
||||
else if (nv->flags & NV_FLAG_SOC_DISPLAY)
|
||||
{
|
||||
nv_soc_free_irqs(nv);
|
||||
}
|
||||
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
|
||||
else
|
||||
@@ -1616,6 +1572,16 @@ failed:
|
||||
nvl->isr_bh_unlocked_mutex = NULL;
|
||||
}
|
||||
|
||||
if (nv->flags & NV_FLAG_TRIGGER_FLR)
|
||||
{
|
||||
if (nvl->pci_dev)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO, "NVRM: Trigger FLR on Failure!\n");
|
||||
os_pci_trigger_flr((void *)nvl->pci_dev);
|
||||
}
|
||||
nv->flags &= ~NV_FLAG_TRIGGER_FLR;
|
||||
}
|
||||
|
||||
nv_dev_free_stacks(nvl);
|
||||
|
||||
if (power_ref)
|
||||
@@ -1978,21 +1944,10 @@ failed:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void validate_numa_shutdown_state(nv_linux_state_t *nvl)
|
||||
{
|
||||
int numa_status = nv_get_numa_status(nvl);
|
||||
WARN_ON((numa_status != NV_IOCTL_NUMA_STATUS_OFFLINE) &&
|
||||
(numa_status != NV_IOCTL_NUMA_STATUS_DISABLED));
|
||||
}
|
||||
|
||||
void nv_shutdown_adapter(nvidia_stack_t *sp,
|
||||
nv_state_t *nv,
|
||||
nv_linux_state_t *nvl)
|
||||
{
|
||||
#if defined(NVCPU_PPC64LE)
|
||||
validate_numa_shutdown_state(nvl);
|
||||
#endif
|
||||
|
||||
rm_disable_adapter(sp, nv);
|
||||
|
||||
// It's safe to call nv_kthread_q_stop even if queue is not initialized
|
||||
@@ -2011,8 +1966,7 @@ void nv_shutdown_adapter(nvidia_stack_t *sp,
|
||||
}
|
||||
|
||||
if (!(nv->flags & NV_FLAG_USES_MSIX) &&
|
||||
!(nv->flags & NV_FLAG_SOC_DISPLAY) &&
|
||||
!(nv->flags & NV_FLAG_SOC_IGPU))
|
||||
!(nv->flags & NV_FLAG_SOC_DISPLAY))
|
||||
{
|
||||
free_irq(nv->interrupt_line, (void *)nvl);
|
||||
if (nv->flags & NV_FLAG_USES_MSI)
|
||||
@@ -2024,6 +1978,7 @@ void nv_shutdown_adapter(nvidia_stack_t *sp,
|
||||
}
|
||||
else if (nv->flags & NV_FLAG_SOC_DISPLAY)
|
||||
{
|
||||
nv_soc_free_irqs(nv);
|
||||
}
|
||||
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
|
||||
else
|
||||
@@ -2223,12 +2178,10 @@ nvidia_close_callback(
|
||||
{
|
||||
up(&nvl->ldata_lock);
|
||||
|
||||
#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE)
|
||||
if (bRemove)
|
||||
{
|
||||
NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(nvl->pci_dev);
|
||||
pci_stop_and_remove_bus_device(nvl->pci_dev);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
@@ -2910,7 +2863,7 @@ nvidia_isr(
|
||||
|
||||
if(rm_handled == NV_FALSE)
|
||||
{
|
||||
os_get_current_time(&sec, &usec);
|
||||
os_get_system_time(&sec, &usec);
|
||||
currentTime = ((NvU64)sec) * 1000000 + (NvU64)usec;
|
||||
|
||||
/* Reset unhandled count if it's been more than 0.1 seconds since the last unhandled IRQ */
|
||||
@@ -3220,7 +3173,7 @@ nv_map_guest_pages(nv_alloc_t *at,
|
||||
|
||||
for (j = 0; j < page_count; j++)
|
||||
{
|
||||
pages[j] = NV_GET_PAGE_STRUCT(at->page_table[page_idx+j]->phys_addr);
|
||||
pages[j] = NV_GET_PAGE_STRUCT(at->page_table[page_idx+j].phys_addr);
|
||||
}
|
||||
|
||||
virt_addr = nv_vm_map_pages(pages, page_count,
|
||||
@@ -3275,7 +3228,7 @@ nv_alias_pages(
|
||||
|
||||
for (i=0; i < at->num_pages; ++i)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr = &at->page_table[i];
|
||||
|
||||
if (contiguous && i>0)
|
||||
{
|
||||
@@ -3334,7 +3287,7 @@ NV_STATUS NV_API_CALL nv_register_peer_io_mem(
|
||||
|
||||
for (i = 0; i < page_count; i++)
|
||||
{
|
||||
at->page_table[i]->phys_addr = addr;
|
||||
at->page_table[i].phys_addr = addr;
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
@@ -3379,7 +3332,6 @@ NV_STATUS NV_API_CALL nv_register_user_pages(
|
||||
NvU64 i;
|
||||
struct page **user_pages;
|
||||
nv_linux_state_t *nvl;
|
||||
nvidia_pte_t *page_ptr;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_register_user_pages: 0x%" NvU64_fmtx"\n", page_count);
|
||||
user_pages = *priv_data;
|
||||
@@ -3414,10 +3366,7 @@ NV_STATUS NV_API_CALL nv_register_user_pages(
|
||||
* We only assign the physical address and not the DMA address, since
|
||||
* this allocation hasn't been DMA-mapped yet.
|
||||
*/
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr->phys_addr = page_to_phys(user_pages[i]);
|
||||
|
||||
phys_addr[i] = page_ptr->phys_addr;
|
||||
at->page_table[i].phys_addr = phys_addr[i] = page_to_phys(user_pages[i]);
|
||||
}
|
||||
|
||||
/* Save off the user pages array to be restored later */
|
||||
@@ -3498,7 +3447,7 @@ NV_STATUS NV_API_CALL nv_register_phys_pages(
|
||||
|
||||
for (i = 0; i < page_count; i++)
|
||||
{
|
||||
at->page_table[i]->phys_addr = phys_addr[i];
|
||||
at->page_table[i].phys_addr = phys_addr[i];
|
||||
}
|
||||
|
||||
at->user_pages = NULL;
|
||||
@@ -3516,7 +3465,8 @@ NV_STATUS NV_API_CALL nv_register_sgt(
|
||||
NvU32 cache_type,
|
||||
void **priv_data,
|
||||
struct sg_table *import_sgt,
|
||||
void *import_priv
|
||||
void *import_priv,
|
||||
NvBool is_peer_mmio
|
||||
)
|
||||
{
|
||||
nv_alloc_t *at;
|
||||
@@ -3531,23 +3481,70 @@ NV_STATUS NV_API_CALL nv_register_sgt(
|
||||
if (at == NULL)
|
||||
return NV_ERR_NO_MEMORY;
|
||||
|
||||
/* Populate phys addrs with DMA addrs from SGT */
|
||||
for_each_sg(import_sgt->sgl, sg, import_sgt->nents, i)
|
||||
//
|
||||
// TODO: When ISO SMMU is not present, dma mapping of imported ISO memory
|
||||
// causes crash during __clean_dcache_area_poc. dma mapping of ISO
|
||||
// memory allocated by RM (via __get_free_pages) still works.
|
||||
// Skip dma mapping of imported ISO memory to unblock Tegra Display in
|
||||
// AV+L. Bug 200765629 and 3396656.
|
||||
// RM will not allow CPU mapping support for DMA addrs (IOVA) based SGTs.
|
||||
//
|
||||
/* For DMA addrs (IOVA) based SGT */
|
||||
if (!sg_page(import_sgt->sgl) ||
|
||||
NV_IS_SOC_DISPLAY_DEVICE(nv))
|
||||
{
|
||||
/*
|
||||
* It is possible for dma_map_sg() to merge scatterlist entries, so
|
||||
* make sure we account for that here.
|
||||
*/
|
||||
for (sg_addr = sg_dma_address(sg), sg_len = sg_dma_len(sg), sg_off = 0;
|
||||
(sg_off < sg_len) && (j < page_count);
|
||||
sg_off += PAGE_SIZE, j++)
|
||||
/* Populate phys addrs with DMA addrs from SGT */
|
||||
for_each_sg(import_sgt->sgl, sg, import_sgt->nents, i)
|
||||
{
|
||||
phys_addr[j] = sg_addr + sg_off;
|
||||
/*
|
||||
* It is possible for dma_map_sg() to merge scatterlist entries, so
|
||||
* make sure we account for that here.
|
||||
*/
|
||||
for (sg_addr = sg_dma_address(sg), sg_len = sg_dma_len(sg), sg_off = 0;
|
||||
(sg_off < sg_len) && (j < page_count);
|
||||
sg_off += PAGE_SIZE, j++)
|
||||
{
|
||||
phys_addr[j] = sg_addr + sg_off;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Populate phys addrs from SGT */
|
||||
for_each_sg(import_sgt->sgl, sg, import_sgt->orig_nents, i)
|
||||
{
|
||||
if (WARN_ON(sg->offset != 0))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: RM is not supporting sg->offset != 0 use case now.!\n");
|
||||
nvos_free_alloc(at);
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store the phys_addr instead of dma_address.
|
||||
* Use sg_phys() instead of sg_dma_address().
|
||||
*/
|
||||
for ((sg_addr = sg_phys(sg), sg_len = sg->length, sg_off = 0);
|
||||
((sg_off < sg_len) && (j < page_count));
|
||||
(sg_off += PAGE_SIZE, j++))
|
||||
{
|
||||
phys_addr[j] = sg_addr + sg_off;
|
||||
at->page_table[j].phys_addr = phys_addr[j];
|
||||
}
|
||||
}
|
||||
WARN_ON(j != page_count);
|
||||
|
||||
// Setting memory flags to io and contiguous.
|
||||
at->flags.peer_io = is_peer_mmio;
|
||||
if (import_sgt->orig_nents == 1)
|
||||
{
|
||||
at->flags.contig = NV_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Setting memory flags to cacheable and discontiguous.
|
||||
* Setting memory flags to cacheable.
|
||||
*/
|
||||
at->cache_type = cache_type;
|
||||
|
||||
@@ -3561,6 +3558,8 @@ NV_STATUS NV_API_CALL nv_register_sgt(
|
||||
|
||||
at->order = get_order(at->num_pages * PAGE_SIZE);
|
||||
|
||||
NV_ATOMIC_INC(at->usage_count);
|
||||
|
||||
*priv_data = at;
|
||||
|
||||
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
||||
@@ -3590,7 +3589,10 @@ void NV_API_CALL nv_unregister_sgt(
|
||||
*import_priv = at->import_priv;
|
||||
}
|
||||
|
||||
nvos_free_alloc(at);
|
||||
if (NV_ATOMIC_DEC_AND_TEST(at->usage_count))
|
||||
{
|
||||
nvos_free_alloc(at);
|
||||
}
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_unregister_phys_pages(
|
||||
@@ -3638,7 +3640,7 @@ NV_STATUS NV_API_CALL nv_get_phys_pages(
|
||||
page_count = NV_MIN(*pNumPages, at->num_pages);
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
pages[i] = NV_GET_PAGE_STRUCT(at->page_table[i]->phys_addr);
|
||||
pages[i] = NV_GET_PAGE_STRUCT(at->page_table[i].phys_addr);
|
||||
}
|
||||
|
||||
*pNumPages = page_count;
|
||||
@@ -3679,8 +3681,8 @@ void* NV_API_CALL nv_alloc_kernel_mapping(
|
||||
// those pages to obtain virtual address.
|
||||
//
|
||||
isUserAllocatedMem = at->flags.user &&
|
||||
!at->page_table[pageIndex]->virt_addr &&
|
||||
at->page_table[pageIndex]->phys_addr;
|
||||
!at->page_table[pageIndex].virt_addr &&
|
||||
at->page_table[pageIndex].phys_addr;
|
||||
|
||||
//
|
||||
// User memory may NOT have kernel VA. So check this and fallback to else
|
||||
@@ -3688,10 +3690,11 @@ void* NV_API_CALL nv_alloc_kernel_mapping(
|
||||
//
|
||||
if (((size + pageOffset) <= PAGE_SIZE) &&
|
||||
!at->flags.guest && !at->flags.aliased &&
|
||||
!isUserAllocatedMem && !at->flags.physical)
|
||||
!isUserAllocatedMem && !at->flags.physical &&
|
||||
!at->import_sgt)
|
||||
{
|
||||
*pPrivate = NULL;
|
||||
return (void *)(at->page_table[pageIndex]->virt_addr + pageOffset);
|
||||
return (void *)(at->page_table[pageIndex].virt_addr + pageOffset);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -3706,7 +3709,7 @@ void* NV_API_CALL nv_alloc_kernel_mapping(
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_KMALLOC(pages, sizeof(struct page *) * page_count);
|
||||
pages = kvmalloc_array(page_count, sizeof(struct page *), NV_GFP_KERNEL);
|
||||
if (pages == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
@@ -3715,11 +3718,11 @@ void* NV_API_CALL nv_alloc_kernel_mapping(
|
||||
}
|
||||
|
||||
for (j = 0; j < page_count; j++)
|
||||
pages[j] = NV_GET_PAGE_STRUCT(at->page_table[pageIndex+j]->phys_addr);
|
||||
pages[j] = NV_GET_PAGE_STRUCT(at->page_table[pageIndex+j].phys_addr);
|
||||
|
||||
virt_addr = nv_vm_map_pages(pages, page_count,
|
||||
at->cache_type == NV_MEMORY_CACHED, at->flags.unencrypted);
|
||||
NV_KFREE(pages, sizeof(struct page *) * page_count);
|
||||
kvfree(pages);
|
||||
}
|
||||
|
||||
if (virt_addr == 0)
|
||||
@@ -3735,7 +3738,7 @@ void* NV_API_CALL nv_alloc_kernel_mapping(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_free_kernel_mapping(
|
||||
void NV_API_CALL nv_free_kernel_mapping(
|
||||
nv_state_t *nv,
|
||||
void *pAllocPrivate,
|
||||
void *address,
|
||||
@@ -3757,8 +3760,6 @@ NV_STATUS NV_API_CALL nv_free_kernel_mapping(
|
||||
{
|
||||
nv_vm_unmap_pages(virt_addr, page_count);
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_alloc_pages(
|
||||
@@ -3852,12 +3853,11 @@ NV_STATUS NV_API_CALL nv_alloc_pages(
|
||||
*/
|
||||
if ((nv == NULL) || will_remap)
|
||||
{
|
||||
pte_array[i] = at->page_table[i]->phys_addr;
|
||||
pte_array[i] = at->page_table[i].phys_addr;
|
||||
}
|
||||
else
|
||||
{
|
||||
pte_array[i] = nv_phys_to_dma(dev,
|
||||
at->page_table[i]->phys_addr);
|
||||
pte_array[i] = nv_phys_to_dma(dev, at->page_table[i].phys_addr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3901,7 +3901,7 @@ NV_STATUS NV_API_CALL nv_free_pages(
|
||||
if (!NV_ATOMIC_DEC_AND_TEST(at->usage_count))
|
||||
return NV_OK;
|
||||
|
||||
if (!at->flags.guest)
|
||||
if (!at->flags.guest && !at->import_sgt)
|
||||
{
|
||||
if (at->flags.contig)
|
||||
nv_free_contig_pages(at);
|
||||
@@ -4249,13 +4249,20 @@ void NV_API_CALL nv_flush_snapshot_timer(void)
|
||||
}
|
||||
|
||||
static int __init
|
||||
nvos_count_devices(void)
|
||||
nvos_count_devices(int *num_pci_devices, int *num_platform_devices)
|
||||
{
|
||||
int count;
|
||||
int nplatform = 0;
|
||||
int npci = nv_pci_count_devices();
|
||||
|
||||
count = nv_pci_count_devices();
|
||||
nplatform = nv_platform_count_devices();
|
||||
|
||||
return count;
|
||||
if (num_pci_devices != NULL)
|
||||
*num_pci_devices = npci;
|
||||
|
||||
if (num_platform_devices != NULL)
|
||||
*num_platform_devices = nplatform;
|
||||
|
||||
return npci + nplatform;
|
||||
}
|
||||
|
||||
#if NVCPU_IS_AARCH64
|
||||
@@ -4460,8 +4467,37 @@ nvidia_suspend(
|
||||
}
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
|
||||
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
||||
/* Handle GenPD suspend sequence for Tegra PCI iGPU */
|
||||
if (dev_is_pci(dev) && nv->is_tegra_pci_igpu_rg_enabled == NV_TRUE)
|
||||
{
|
||||
/* Turn on the GPU power before saving PCI configuration */
|
||||
pm_runtime_forbid(dev);
|
||||
|
||||
/*
|
||||
* If a PCI device is attached to a GenPD power domain,
|
||||
* resume_early callback in PCI framework will not be
|
||||
* executed during static resume. That leads to the PCI
|
||||
* configuration couldn't be properly restored.
|
||||
*
|
||||
* Clear the power domain of PCI GPU before static suspend
|
||||
* to make sure its PCI configuration could be properly
|
||||
* restored during static resume.
|
||||
*/
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"NVRM: set GPU pm_domain to NULL before suspend\n");
|
||||
dev_pm_domain_set(dev, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
down(&nvl->ldata_lock);
|
||||
|
||||
if (!nv->is_pm_supported)
|
||||
{
|
||||
status = NV_ERR_NOT_SUPPORTED;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (((nv->flags & NV_FLAG_OPEN) == 0) &&
|
||||
((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) == 0))
|
||||
{
|
||||
@@ -4516,6 +4552,11 @@ nvidia_resume(
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
struct pci_dev *pci_dev;
|
||||
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
||||
struct pci_bus *bus;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct device *ctrl;
|
||||
#endif
|
||||
nv_linux_state_t *nvl;
|
||||
nv_state_t *nv;
|
||||
|
||||
@@ -4530,6 +4571,31 @@ nvidia_resume(
|
||||
}
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
|
||||
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
||||
/* Handle GenPD resume sequence for Tegra PCI iGPU */
|
||||
if (dev_is_pci(dev) && nv->is_tegra_pci_igpu_rg_enabled == NV_TRUE)
|
||||
{
|
||||
// Get PCI controller device
|
||||
bus = pci_dev->bus;
|
||||
while (bus->parent)
|
||||
bus = bus->parent;
|
||||
|
||||
bridge = to_pci_host_bridge(bus->bridge);
|
||||
ctrl = bridge->dev.parent;
|
||||
|
||||
/*
|
||||
* Attach GPU power domain back, this driver cannot directly use
|
||||
* dev_pm_domain_set to recover the pm_domain because kernel warning
|
||||
* will be triggered if the caller driver is already bounded.
|
||||
*/
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"NVRM: restore GPU pm_domain after suspend\n");
|
||||
dev->pm_domain = ctrl->pm_domain;
|
||||
|
||||
pm_runtime_allow(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
down(&nvl->ldata_lock);
|
||||
|
||||
if ((nv->flags & NV_FLAG_SUSPENDED) == 0)
|
||||
@@ -4619,6 +4685,29 @@ nv_suspend_devices(
|
||||
nv_linux_state_t *nvl;
|
||||
NvBool resume_devices = NV_FALSE;
|
||||
NV_STATUS status = NV_OK;
|
||||
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
||||
nv_state_t *nv;
|
||||
struct device *dev;
|
||||
|
||||
LOCK_NV_LINUX_DEVICES();
|
||||
|
||||
/* For Tegra PCI iGPU, forbid the GPU suspend via procfs */
|
||||
for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next)
|
||||
{
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
dev = nvl->dev;
|
||||
if (dev_is_pci(dev) && nv->is_tegra_pci_igpu_rg_enabled == NV_TRUE)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"NVRM: GPU suspend through procfs is forbidden with Tegra iGPU\n");
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
}
|
||||
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
#endif
|
||||
|
||||
nvidia_modeset_suspend(0);
|
||||
|
||||
@@ -4860,14 +4949,73 @@ int nv_pmops_runtime_suspend(
|
||||
struct device *dev
|
||||
)
|
||||
{
|
||||
return nvidia_transition_dynamic_power(dev, NV_TRUE);
|
||||
#if defined(CONFIG_PM_DEVFREQ)
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
nv_linux_state_t *nvl = pci_get_drvdata(pci_dev);
|
||||
#endif
|
||||
int err = 0;
|
||||
|
||||
err = nvidia_transition_dynamic_power(dev, NV_TRUE);
|
||||
if (err)
|
||||
{
|
||||
return err;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PM_DEVFREQ)
|
||||
if (nvl->devfreq_suspend != NULL)
|
||||
{
|
||||
err = nvl->devfreq_suspend(dev);
|
||||
if (err)
|
||||
{
|
||||
goto nv_pmops_runtime_suspend_exit;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
nv_pmops_runtime_suspend_exit:
|
||||
nvidia_transition_dynamic_power(dev, NV_FALSE);
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
int nv_pmops_runtime_resume(
|
||||
struct device *dev
|
||||
)
|
||||
{
|
||||
return nvidia_transition_dynamic_power(dev, NV_FALSE);
|
||||
#if defined(CONFIG_PM_DEVFREQ)
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
nv_linux_state_t *nvl = pci_get_drvdata(pci_dev);
|
||||
#endif
|
||||
int err;
|
||||
|
||||
#if defined(CONFIG_PM_DEVFREQ)
|
||||
if (nvl->devfreq_resume != NULL)
|
||||
{
|
||||
err = nvl->devfreq_resume(dev);
|
||||
if (err)
|
||||
{
|
||||
goto nv_pmops_runtime_resume_exit;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
err = nvidia_transition_dynamic_power(dev, NV_FALSE);
|
||||
#if defined(CONFIG_PM_DEVFREQ)
|
||||
if (err)
|
||||
{
|
||||
goto nv_pmops_runtime_resume_exit;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
nv_pmops_runtime_resume_exit:
|
||||
if (nvl->devfreq_suspend != NULL)
|
||||
{
|
||||
nvl->devfreq_suspend(dev);
|
||||
}
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
#endif /* defined(CONFIG_PM) */
|
||||
|
||||
@@ -5234,11 +5382,7 @@ NV_STATUS NV_API_CALL nv_indicate_idle(
|
||||
mutex_unlock(&of->mutex);
|
||||
}
|
||||
#else
|
||||
#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
|
||||
kernel_read(file, &buf, 1, &f_pos);
|
||||
#else
|
||||
kernel_read(file, f_pos, &buf, 1);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return NV_OK;
|
||||
@@ -5676,6 +5820,117 @@ NvBool NV_API_CALL nv_s2idle_pm_configured(void)
|
||||
return (memcmp(buf, "[s2idle]", 8) == 0);
|
||||
}
|
||||
|
||||
NvBool NV_API_CALL nv_pci_tegra_register_power_domain
|
||||
(
|
||||
nv_state_t *nv,
|
||||
NvBool attach
|
||||
)
|
||||
{
|
||||
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct pci_dev *pci_dev = nvl->pci_dev;
|
||||
struct device_node *node = pci_dev->dev.of_node;
|
||||
|
||||
if (attach)
|
||||
{
|
||||
if (!node)
|
||||
{
|
||||
nv_printf(NV_DBG_WARNINGS, "NVRM: No dt node associated with this device\n");
|
||||
return NV_FALSE;
|
||||
}
|
||||
if (!of_find_property(node, "power-domains", NULL))
|
||||
{
|
||||
nv_printf(NV_DBG_WARNINGS, "NVRM: No power-domains is defined in the dt node\n");
|
||||
return NV_FALSE;
|
||||
}
|
||||
|
||||
nv_printf(NV_DBG_INFO, "NVRM: Attaching device to GPU power domain \n");
|
||||
return (dev_pm_domain_attach(&pci_dev->dev, true) == 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_printf(NV_DBG_INFO, "NVRM: Detaching device to GPU power domain \n");
|
||||
dev_pm_domain_detach(&pci_dev->dev, true);
|
||||
}
|
||||
#endif
|
||||
return NV_TRUE;
|
||||
}
|
||||
|
||||
NvBool NV_API_CALL nv_pci_tegra_pm_init
|
||||
(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct pci_dev *pci_dev = nvl->pci_dev;
|
||||
struct pci_bus *bus = pci_dev->bus;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct device *ctrl;
|
||||
|
||||
if (pci_dev->dev.pm_domain != NULL ||
|
||||
nv_pci_tegra_register_power_domain(nv, NV_TRUE) == NV_FALSE)
|
||||
{
|
||||
return NV_FALSE;
|
||||
}
|
||||
|
||||
// Enable runtime PM for PCIe controller of GPU to avoid
|
||||
// PCIe enumeration failure with tegra iGPU
|
||||
while (bus->parent)
|
||||
bus = bus->parent;
|
||||
|
||||
bridge = to_pci_host_bridge(bus->bridge);
|
||||
ctrl = bridge->dev.parent;
|
||||
|
||||
nv_printf(NV_DBG_INFO, "NVRM: Enable runtime PM for PCIe Controller\n");
|
||||
pm_runtime_enable(ctrl);
|
||||
|
||||
// Use autosuspend for GPU with idleness threshold 500 ms
|
||||
pm_runtime_set_autosuspend_delay(&pci_dev->dev, 500);
|
||||
pm_runtime_use_autosuspend(&pci_dev->dev);
|
||||
#endif
|
||||
return NV_TRUE;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_pci_tegra_pm_deinit
|
||||
(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct pci_dev *pci_dev = nvl->pci_dev;
|
||||
struct pci_bus *bus = pci_dev->bus;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct device *ctrl;
|
||||
|
||||
if (pci_dev->dev.pm_domain == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Stop autosuspend for GPU
|
||||
pm_runtime_dont_use_autosuspend(&pci_dev->dev);
|
||||
|
||||
/*
|
||||
* Enable runtime PM for PCIe controller of GPU. Because PCIe controller
|
||||
* is also registered to GPU power domain on L4T, runtime PM of PCIe
|
||||
* controller needs to be enabled so that it won't hold GPU power domain
|
||||
* on when GPU is idle and being runtime suspended.
|
||||
*/
|
||||
while (bus->parent)
|
||||
bus = bus->parent;
|
||||
|
||||
bridge = to_pci_host_bridge(bus->bridge);
|
||||
ctrl = bridge->dev.parent;
|
||||
|
||||
nv_printf(NV_DBG_INFO, "NVRM: Disable runtime PM for PCIe Controller\n");
|
||||
pm_runtime_disable(ctrl);
|
||||
|
||||
nv_pci_tegra_register_power_domain(nv, NV_FALSE);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Function query system chassis info, to figure out if the platform is
|
||||
* Laptop or Notebook.
|
||||
@@ -5823,9 +6078,7 @@ NV_STATUS NV_API_CALL nv_get_egm_info(
|
||||
NvS32 *egm_node_id
|
||||
)
|
||||
{
|
||||
#if defined(NV_DEVICE_PROPERTY_READ_U64_PRESENT) && \
|
||||
defined(CONFIG_ACPI_NUMA) && \
|
||||
NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node
|
||||
#if defined(CONFIG_ACPI_NUMA) && NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
NvU64 pa, sz, pxm;
|
||||
|
||||
@@ -5870,7 +6123,7 @@ NV_STATUS NV_API_CALL nv_get_egm_info(
|
||||
return NV_OK;
|
||||
|
||||
failed:
|
||||
#endif // NV_DEVICE_PROPERTY_READ_U64_PRESENT
|
||||
#endif // defined(CONFIG_ACPI_NUMA) && NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node
|
||||
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Cannot get EGM info\n");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#define _NV_GPU_OPS_H_
|
||||
#include "nvgputypes.h"
|
||||
#include "nv_uvm_types.h"
|
||||
#include "nv_uvm_user_types.h"
|
||||
|
||||
typedef struct gpuSession *gpuSessionHandle;
|
||||
typedef struct gpuDevice *gpuDeviceHandle;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -1154,7 +1154,7 @@ void nv_uvm_notify_start_device(const NvU8 *pUuid)
|
||||
NvProcessorUuid uvmUuid;
|
||||
struct UvmOpsUvmEvents *events;
|
||||
|
||||
memcpy(uvmUuid.uuid, pUuid, UVM_UUID_LEN);
|
||||
memcpy(uvmUuid.uuid, pUuid, NV_UUID_LEN);
|
||||
|
||||
// Synchronize callbacks with unregistration
|
||||
down(&g_pNvUvmEventsLock);
|
||||
@@ -1175,7 +1175,7 @@ void nv_uvm_notify_stop_device(const NvU8 *pUuid)
|
||||
NvProcessorUuid uvmUuid;
|
||||
struct UvmOpsUvmEvents *events;
|
||||
|
||||
memcpy(uvmUuid.uuid, pUuid, UVM_UUID_LEN);
|
||||
memcpy(uvmUuid.uuid, pUuid, NV_UUID_LEN);
|
||||
|
||||
// Synchronize callbacks with unregistration
|
||||
down(&g_pNvUvmEventsLock);
|
||||
@@ -1246,7 +1246,7 @@ NV_STATUS nv_uvm_drain_P2P(const NvU8 *uuid)
|
||||
struct UvmOpsUvmEvents *events;
|
||||
NV_STATUS ret = NV_ERR_NOT_SUPPORTED;
|
||||
|
||||
memcpy(uvmUuid.uuid, uuid, UVM_UUID_LEN);
|
||||
memcpy(uvmUuid.uuid, uuid, NV_UUID_LEN);
|
||||
|
||||
// Synchronize callbacks with unregistration
|
||||
down(&g_pNvUvmEventsLock);
|
||||
@@ -1270,7 +1270,7 @@ NV_STATUS nv_uvm_resume_P2P(const NvU8 *uuid)
|
||||
struct UvmOpsUvmEvents *events;
|
||||
NV_STATUS ret = NV_ERR_NOT_SUPPORTED;
|
||||
|
||||
memcpy(uvmUuid.uuid, uuid, UVM_UUID_LEN);
|
||||
memcpy(uvmUuid.uuid, uuid, NV_UUID_LEN);
|
||||
|
||||
// Synchronize callbacks with unregistration
|
||||
down(&g_pNvUvmEventsLock);
|
||||
|
||||
@@ -119,38 +119,16 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened_wc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache_shared
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_get_domain_bus_and_slot
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_num_physpages
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += xen_ioemu_inject_msi
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += phys_to_dma
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_dma_ops
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_attr_macros
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_page_attrs
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += write_cr4
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_node_by_phandle
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_node_to_nid
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_ibm_chip_id
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_stop_and_remove_bus_device
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_rebar_get_possible_sizes
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += wait_for_random_bytes
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += register_cpu_notifier
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpuhp_setup_state
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_resource
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_backlight_device_by_name
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_msix_range
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_write_has_pointer_pos_arg
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_direct_map_resource
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_get_platform
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_bpmp_send_receive
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += flush_cache_all
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += jiffies_to_timespec
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += full_name_hash
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_platform_has
|
||||
@@ -159,35 +137,17 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += hv_get_isolation_type
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += follow_pfn
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ptep_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += device_property_read_u64
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_platform_populate
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_dma_configure
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_count_elems_of_size
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u8_array
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u32_array
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_new_client_device
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_unregister_device
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_named_gpio
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_gpio_request_one
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_input
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_output
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_get_value
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_set_value
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_to_irq
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_icc_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_put
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_set_bw
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_export_args
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap_atomic
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_set_mask_and_coherent
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
|
||||
@@ -195,30 +155,20 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += stack_trace
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += crypto_tfm_ctx_aligned
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += assign_str
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioasid_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_pasid_drop
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_sva_bind_device_has_drvdata_arg
|
||||
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_map_sg_attrs
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_dma_ops
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___close_fd
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_close_fd
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd_flags
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_get_default_device
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_byte_offset
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_aperture
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_register_ipc_client
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_unregister_ipc_client
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_client_ipc_send_recv
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_dram_clk_to_mc_clk
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_dram_num_channels
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dram_types
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_screen_info
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_screen_info
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_i2c_bus_status
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_fuse_control_read
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_get_platform
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pci_find_host_bridge
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_send_cmd
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_set_init_cb
|
||||
@@ -230,6 +180,7 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto_akcipher_verify
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pte
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += follow_pte_arg_vma
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += dma_buf_ops_attach_has_arg_dev
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pfnmap_start
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_pci_ats_supported
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += ecc_digits_from_bytes
|
||||
@@ -238,38 +189,31 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_set_memory_decrypted
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl___platform_driver_register
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___platform_driver_register
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_hrtimer_setup
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_mutex_destroy
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl___vma_start_write
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_iommu_dev_enable_feature
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_iommu_dev_disable_feature
|
||||
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += noncoherent_swiotlb_dma_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_insert_pfn_prot
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vmalloc_has_pgprot_t_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_channel_state
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_dev_has_ats_enabled
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += num_registered_fb
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_driver_has_driver_managed_dma
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += memory_failure_has_trapno_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += memory_failure_queue_has_trapno_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += foll_longterm_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += bus_type_has_iommu_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += of_property_for_each_u32_has_internal_args
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += platform_driver_struct_remove_returns_void
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += class_create_has_no_owner_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += class_devnode_has_const_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += devfreq_has_freq_table
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += has_enum_pidtype_tgid
|
||||
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present
|
||||
@@ -285,7 +229,6 @@ NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += vfio_pci_core_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += mdev_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_init
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_off
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += memory_failure_mf_sw_simulated_defined
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -39,3 +39,8 @@ bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
|
||||
const uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size);
|
||||
bool libspdm_check_crypto_backend(void);
|
||||
|
||||
bool libspdm_encode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen);
|
||||
bool libspdm_decode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen);
|
||||
bool libspdm_pem_to_der(const uint8_t *pem_cert, uint8_t *der_cert, size_t pem_size, size_t *p_der_size);
|
||||
bool libspdm_der_to_pem(const uint8_t *der_cert, uint8_t *pem_cert, size_t der_size, size_t *p_pem_size);
|
||||
@@ -25,6 +25,7 @@
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
#include <linux/iommu.h>
|
||||
#include "nv-caps-imex.h"
|
||||
|
||||
#include "nv-platform.h"
|
||||
@@ -34,6 +35,7 @@
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/sys_soc.h>
|
||||
|
||||
#include <linux/pid.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
@@ -51,9 +53,11 @@ extern nv_linux_state_t nv_ctl_device;
|
||||
|
||||
extern nv_kthread_q_t nv_kthread_q;
|
||||
|
||||
NvU32 os_page_size = PAGE_SIZE;
|
||||
NvU64 os_page_mask = NV_PAGE_MASK;
|
||||
NvU8 os_page_shift = PAGE_SHIFT;
|
||||
NvU64 os_page_size = PAGE_SIZE;
|
||||
NvU64 os_max_page_size = PAGE_SIZE << NV_MAX_PAGE_ORDER;
|
||||
NvU64 os_page_mask = NV_PAGE_MASK;
|
||||
NvU8 os_page_shift = PAGE_SHIFT;
|
||||
|
||||
NvBool os_cc_enabled = 0;
|
||||
NvBool os_cc_sev_snp_enabled = 0;
|
||||
NvBool os_cc_snp_vtom_enabled = 0;
|
||||
@@ -474,11 +478,6 @@ void *NV_API_CALL os_mem_copy(
|
||||
* When performing memcpy for memory mapped as device, memcpy_[to/from]io
|
||||
* must be used. WAR to check the source and destination to determine the
|
||||
* correct memcpy_io to use.
|
||||
*
|
||||
* This WAR is limited to just aarch64 for now because the address range used
|
||||
* to map ioremap and vmalloc is different on ppc64le, and is_vmalloc_addr()
|
||||
* does not correctly handle this. is_ioremap_addr() is needed instead. This
|
||||
* will have to be addressed when reorganizing RM to use the new memset model.
|
||||
*/
|
||||
if (is_vmalloc_addr(dst) && !is_vmalloc_addr(src))
|
||||
{
|
||||
@@ -548,11 +547,6 @@ void* NV_API_CALL os_mem_set(
|
||||
*
|
||||
* WAR to check the destination to determine if the memory is of type Device
|
||||
* or Normal, and use the correct memset.
|
||||
*
|
||||
* This WAR is limited to just aarch64 for now because the address range used
|
||||
* to map ioremap and vmalloc is different on ppc64le, and is_vmalloc_addr()
|
||||
* does not correctly handle this. is_ioremap_addr() is needed instead. This
|
||||
* will have to be addressed when reorganizing RM to use the new memset model.
|
||||
*/
|
||||
if (is_vmalloc_addr(dst))
|
||||
{
|
||||
@@ -676,11 +670,11 @@ void NV_API_CALL os_free_mem(void *address)
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* Name: osGetCurrentTime
|
||||
* Name: osGetSystemTime
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
NV_STATUS NV_API_CALL os_get_current_time(
|
||||
NV_STATUS NV_API_CALL os_get_system_time(
|
||||
NvU32 *seconds,
|
||||
NvU32 *useconds
|
||||
)
|
||||
@@ -698,16 +692,14 @@ NV_STATUS NV_API_CALL os_get_current_time(
|
||||
//
|
||||
// Get the High resolution tick count of the system uptime
|
||||
//
|
||||
NvU64 NV_API_CALL os_get_current_tick_hr(void)
|
||||
NvU64 NV_API_CALL os_get_monotonic_time_ns_hr(void)
|
||||
{
|
||||
struct timespec64 tm;
|
||||
ktime_get_raw_ts64(&tm);
|
||||
return (NvU64) timespec64_to_ns(&tm);
|
||||
}
|
||||
|
||||
#if BITS_PER_LONG >= 64
|
||||
|
||||
NvU64 NV_API_CALL os_get_current_tick(void)
|
||||
NvU64 NV_API_CALL os_get_monotonic_time_ns(void)
|
||||
{
|
||||
#if defined(NV_JIFFIES_TO_TIMESPEC_PRESENT)
|
||||
struct timespec ts;
|
||||
@@ -720,47 +712,13 @@ NvU64 NV_API_CALL os_get_current_tick(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
NvU64 NV_API_CALL os_get_tick_resolution(void)
|
||||
NvU64 NV_API_CALL os_get_monotonic_tick_resolution_ns(void)
|
||||
{
|
||||
return (NvU64)jiffies_to_usecs(1) * NSEC_PER_USEC;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
NvU64 NV_API_CALL os_get_current_tick(void)
|
||||
{
|
||||
/*
|
||||
* 'jiffies' overflows regularly on 32-bit builds (unsigned long is 4 bytes
|
||||
* instead of 8 bytes), so it's unwise to build a tick counter on it, since
|
||||
* the rest of the Resman assumes the 'tick' returned from this function is
|
||||
* monotonically increasing and never overflows.
|
||||
*
|
||||
* Instead, use the previous implementation that we've lived with since the
|
||||
* beginning, which uses system clock time to calculate the tick. This is
|
||||
* subject to problems if the system clock time changes dramatically
|
||||
* (more than a second or so) while the Resman is actively tracking a
|
||||
* timeout.
|
||||
*/
|
||||
NvU32 seconds, useconds;
|
||||
|
||||
(void) os_get_current_time(&seconds, &useconds);
|
||||
|
||||
return ((NvU64)seconds * NSEC_PER_SEC +
|
||||
(NvU64)useconds * NSEC_PER_USEC);
|
||||
}
|
||||
|
||||
NvU64 NV_API_CALL os_get_tick_resolution(void)
|
||||
{
|
||||
/*
|
||||
* os_get_current_tick() uses os_get_current_time(), which has
|
||||
* microsecond resolution.
|
||||
*/
|
||||
return 1000ULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
//
|
||||
// Misc services.
|
||||
//
|
||||
@@ -806,6 +764,50 @@ void NV_API_CALL os_get_current_process_name(char *buf, NvU32 len)
|
||||
task_unlock(current);
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_iommu_sva_bind(void *arg, void **handle, NvU32 *pasid)
|
||||
{
|
||||
nv_state_t *nv = arg;
|
||||
#if defined(CONFIG_IOMMU_SVA) && \
|
||||
(defined(NV_IOASID_GET_PRESENT) || defined(NV_MM_PASID_DROP_PRESENT))
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct iommu_sva *sva_handle;
|
||||
|
||||
if (pasid == NULL || handle == NULL)
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
|
||||
*pasid = 0;
|
||||
*handle = NULL;
|
||||
|
||||
if (nv->ats_support && current && current->mm)
|
||||
{
|
||||
#if defined(NV_IOMMU_SVA_BIND_DEVICE_HAS_DRVDATA_ARG)
|
||||
sva_handle = iommu_sva_bind_device(nvl->dev, current->mm, NULL);
|
||||
#else
|
||||
sva_handle = iommu_sva_bind_device(nvl->dev, current->mm);
|
||||
#endif
|
||||
if (!IS_ERR(sva_handle))
|
||||
{
|
||||
*pasid = iommu_sva_get_pasid(sva_handle);
|
||||
*handle = sva_handle;
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "PASID: %u\n", *pasid);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "IOMMU SVA bind failed\n");
|
||||
|
||||
return NV_ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
void NV_API_CALL os_iommu_sva_unbind(void *handle)
|
||||
{
|
||||
#if defined(CONFIG_IOMMU_SVA) && \
|
||||
(defined(NV_IOASID_GET_PRESENT) || defined(NV_MM_PASID_DROP_PRESENT))
|
||||
iommu_sva_unbind_device(handle);
|
||||
#endif
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_get_current_thread(NvU64 *threadId)
|
||||
{
|
||||
if (in_interrupt())
|
||||
@@ -1087,8 +1089,6 @@ void NV_API_CALL os_flush_cpu_write_combine_buffer(void)
|
||||
{
|
||||
#if defined(NVCPU_X86_64)
|
||||
asm volatile("sfence" ::: "memory");
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
__asm__ __volatile__ ("sync" : : : "memory");
|
||||
#elif defined(NVCPU_AARCH64)
|
||||
asm volatile("dsb st" : : : "memory");
|
||||
#else
|
||||
@@ -1247,8 +1247,6 @@ void NV_API_CALL os_dbg_breakpoint(void)
|
||||
__asm__ __volatile__ (".word %c0" :: "i" (KGDB_COMPILED_BREAK));
|
||||
#elif defined(NVCPU_AARCH64)
|
||||
# warning "Need to implement os_dbg_breakpoint() for aarch64"
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
__asm__ __volatile__ ("trap");
|
||||
#endif // NVCPU_*
|
||||
#elif defined(CONFIG_KDB)
|
||||
KDB_ENTER();
|
||||
@@ -1515,7 +1513,7 @@ static NV_STATUS os_get_smbios_header_legacy(NvU64 *pSmbsAddr)
|
||||
}
|
||||
|
||||
// This function is needed only if "efi" is enabled.
|
||||
#if (defined(NV_LINUX_EFI_H_PRESENT) && defined(CONFIG_EFI))
|
||||
#if defined(CONFIG_EFI)
|
||||
static NV_STATUS os_verify_smbios_header_uefi(NvU64 smbsAddr)
|
||||
{
|
||||
NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND;
|
||||
@@ -1558,8 +1556,7 @@ static NV_STATUS os_get_smbios_header_uefi(NvU64 *pSmbsAddr)
|
||||
{
|
||||
NV_STATUS status = NV_ERR_OPERATING_SYSTEM;
|
||||
|
||||
// Make sure that efi.h is present before using "struct efi".
|
||||
#if (defined(NV_LINUX_EFI_H_PRESENT) && defined(CONFIG_EFI))
|
||||
#if defined(CONFIG_EFI)
|
||||
|
||||
// Make sure that efi.h has SMBIOS3_TABLE_GUID present.
|
||||
#if defined(SMBIOS3_TABLE_GUID)
|
||||
@@ -1626,9 +1623,7 @@ NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi
|
||||
|
||||
*pRsdpAddr = 0;
|
||||
|
||||
// Make sure that efi.h is present before using "struct efi".
|
||||
#if (defined(NV_LINUX_EFI_H_PRESENT) && defined(CONFIG_EFI))
|
||||
|
||||
#if defined(CONFIG_EFI)
|
||||
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
|
||||
{
|
||||
*pRsdpAddr = efi.acpi20;
|
||||
@@ -1914,11 +1909,7 @@ NV_STATUS NV_API_CALL os_write_file
|
||||
int num_retries = NV_MAX_NUM_FILE_IO_RETRIES;
|
||||
|
||||
retry:
|
||||
#if defined(NV_KERNEL_WRITE_HAS_POINTER_POS_ARG)
|
||||
num_written = kernel_write(pFile, pBuffer, size, &f_pos);
|
||||
#else
|
||||
num_written = kernel_write(pFile, pBuffer, size, f_pos);
|
||||
#endif
|
||||
if (num_written < 0)
|
||||
{
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
@@ -1958,11 +1949,7 @@ NV_STATUS NV_API_CALL os_read_file
|
||||
int num_retries = NV_MAX_NUM_FILE_IO_RETRIES;
|
||||
|
||||
retry:
|
||||
#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
|
||||
num_read = kernel_read(pFile, pBuffer, size, &f_pos);
|
||||
#else
|
||||
num_read = kernel_read(pFile, f_pos, pBuffer, size);
|
||||
#endif
|
||||
if (num_read < 0)
|
||||
{
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
@@ -2067,10 +2054,8 @@ NV_STATUS NV_API_CALL os_get_random_bytes
|
||||
NvU16 numBytes
|
||||
)
|
||||
{
|
||||
#if defined NV_WAIT_FOR_RANDOM_BYTES_PRESENT
|
||||
if (wait_for_random_bytes() < 0)
|
||||
return NV_ERR_NOT_READY;
|
||||
#endif
|
||||
|
||||
get_random_bytes(bytes, numBytes);
|
||||
return NV_OK;
|
||||
@@ -2122,17 +2107,45 @@ void NV_API_CALL os_wake_up
|
||||
complete_all(&wq->q);
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_get_tegra_platform
|
||||
static bool os_platform_is_fpga(void)
|
||||
{
|
||||
const struct soc_device_attribute soc_attrs[] = {
|
||||
{ .revision = "*FPGA" },
|
||||
{/* sentinel */}
|
||||
};
|
||||
|
||||
if (soc_device_match(soc_attrs)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool os_platform_is_vdk(void)
|
||||
{
|
||||
const struct soc_device_attribute soc_attrs[] = {
|
||||
{ .revision = "VDK" },
|
||||
{/* sentinel */}
|
||||
};
|
||||
|
||||
if (soc_device_match(soc_attrs)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_get_tegra_platform
|
||||
(
|
||||
NvU32 *mode
|
||||
)
|
||||
{
|
||||
#if defined(NV_SOC_TEGRA_FUSE_HELPER_H_PRESENT) && NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
|
||||
if (tegra_platform_is_fpga())
|
||||
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
|
||||
if (os_platform_is_fpga())
|
||||
{
|
||||
*mode = NV_OS_TEGRA_PLATFORM_FPGA;
|
||||
}
|
||||
else if (tegra_platform_is_vdk())
|
||||
else if (os_platform_is_vdk())
|
||||
{
|
||||
*mode = NV_OS_TEGRA_PLATFORM_SIM;
|
||||
}
|
||||
@@ -2624,7 +2637,6 @@ NV_STATUS NV_API_CALL os_offline_page_at_address
|
||||
{
|
||||
#if defined(CONFIG_MEMORY_FAILURE)
|
||||
int flags = 0;
|
||||
int ret;
|
||||
NvU64 pfn;
|
||||
struct page *page = NV_GET_PAGE_STRUCT(address);
|
||||
|
||||
@@ -2649,22 +2661,18 @@ NV_STATUS NV_API_CALL os_offline_page_at_address
|
||||
flags |= MF_SW_SIMULATED;
|
||||
#endif
|
||||
|
||||
#ifdef NV_MEMORY_FAILURE_HAS_TRAPNO_ARG
|
||||
ret = memory_failure(pfn, 0, flags);
|
||||
#else
|
||||
ret = memory_failure(pfn, flags);
|
||||
#endif
|
||||
nv_printf(NV_DBG_INFO, "NVRM: offlining page at address: 0x%llx pfn: 0x%llx\n",
|
||||
address, pfn);
|
||||
|
||||
if (ret != 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: page offlining failed. address: 0x%llx pfn: 0x%llx ret: %d\n",
|
||||
address, pfn, ret);
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
}
|
||||
#ifdef NV_MEMORY_FAILURE_QUEUE_HAS_TRAPNO_ARG
|
||||
memory_failure_queue(pfn, 0, flags);
|
||||
#else
|
||||
memory_failure_queue(pfn, flags);
|
||||
#endif
|
||||
|
||||
return NV_OK;
|
||||
#else // !defined(CONFIG_MEMORY_FAILURE)
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: memory_failure() not supported by kernel. page offlining failed. address: 0x%llx\n",
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: memory_failure_queue() not supported by kernel. page offlining failed. address: 0x%llx\n",
|
||||
address);
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
|
||||
#if defined(NVCPU_FAMILY_X86) && defined(NV_FOLL_LONGTERM_PRESENT) && \
|
||||
(defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS) || \
|
||||
defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS))
|
||||
defined(NV_GET_USER_PAGES_HAS_VMAS_ARG))
|
||||
#define NV_NUM_PIN_PAGES_PER_ITERATION 0x80000
|
||||
#endif
|
||||
|
||||
@@ -258,7 +258,7 @@ NV_STATUS NV_API_CALL os_lock_user_pages(
|
||||
}
|
||||
#if defined(NVCPU_FAMILY_X86) && defined(NV_FOLL_LONGTERM_PRESENT) && \
|
||||
(defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS) || \
|
||||
defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS))
|
||||
defined(NV_GET_USER_PAGES_HAS_VMAS_ARG))
|
||||
//
|
||||
// NV_PIN_USER_PAGES() passes in NULL for the vmas parameter (if required)
|
||||
// in pin_user_pages() (or get_user_pages() if pin_user_pages() does not
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -137,25 +137,14 @@ NV_STATUS NV_API_CALL os_pci_write_dword(
|
||||
|
||||
NvBool NV_API_CALL os_pci_remove_supported(void)
|
||||
{
|
||||
#if defined NV_PCI_STOP_AND_REMOVE_BUS_DEVICE
|
||||
return NV_TRUE;
|
||||
#else
|
||||
return NV_FALSE;
|
||||
#endif
|
||||
}
|
||||
|
||||
void NV_API_CALL os_pci_remove(
|
||||
void *handle
|
||||
)
|
||||
{
|
||||
#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE)
|
||||
NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(handle);
|
||||
#elif defined(DEBUG)
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s() is called even though NV_PCI_STOP_AND_REMOVE_BUS_DEVICE is not defined\n",
|
||||
__FUNCTION__);
|
||||
os_dbg_breakpoint();
|
||||
#endif
|
||||
pci_stop_and_remove_bus_device(handle);
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL
|
||||
|
||||
@@ -331,6 +331,18 @@ NV_STATUS NV_API_CALL os_registry_init(void)
|
||||
"RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg));
|
||||
}
|
||||
|
||||
//
|
||||
// CoherentGPUMemoryMode=driver just implies the older
|
||||
// EnableUserNUMAManagement=0 option
|
||||
//
|
||||
if (NVreg_CoherentGPUMemoryMode != NULL)
|
||||
{
|
||||
if (strcmp(NVreg_CoherentGPUMemoryMode, "driver") == 0)
|
||||
{
|
||||
NVreg_EnableUserNUMAManagement = 0;
|
||||
}
|
||||
}
|
||||
|
||||
rm_parse_option_string(sp, NVreg_RegistryDwords);
|
||||
|
||||
for (i = 0; (entry = &nv_parms[i])->name != NULL; i++)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -39,7 +39,7 @@ NV_STATUS NV_API_CALL os_match_mmap_offset(
|
||||
{
|
||||
if (at->flags.contig)
|
||||
{
|
||||
if (offset == (at->page_table[0]->phys_addr + (i * PAGE_SIZE)))
|
||||
if (offset == (at->page_table[0].phys_addr + (i * PAGE_SIZE)))
|
||||
{
|
||||
*pPageIndex = i;
|
||||
return NV_OK;
|
||||
@@ -47,7 +47,7 @@ NV_STATUS NV_API_CALL os_match_mmap_offset(
|
||||
}
|
||||
else
|
||||
{
|
||||
if (offset == at->page_table[i]->phys_addr)
|
||||
if (offset == at->page_table[i].phys_addr)
|
||||
{
|
||||
*pPageIndex = i;
|
||||
return NV_OK;
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
|
||||
#include "linux_nvswitch.h"
|
||||
#include "nv-procfs.h"
|
||||
#include "ioctl_nvswitch.h"
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
@@ -60,6 +61,7 @@ nv_procfs_read_device_info
|
||||
)
|
||||
{
|
||||
NVSWITCH_DEV *nvswitch_dev = s->private;
|
||||
char uuid_string[NVSWITCH_UUID_STRING_LENGTH] = { 0 };
|
||||
|
||||
if (!nvswitch_dev)
|
||||
{
|
||||
@@ -83,6 +85,11 @@ nv_procfs_read_device_info
|
||||
seq_printf(s, "N/A\n");
|
||||
}
|
||||
|
||||
nvswitch_uuid_to_string(&nvswitch_dev->uuid, uuid_string, NVSWITCH_UUID_STRING_LENGTH);
|
||||
|
||||
seq_printf(s, "UUID: %s\n", uuid_string);
|
||||
seq_printf(s, "Physical location ID: %04x\n", nvswitch_dev->phys_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user