575.51.02

This commit is contained in:
Bernhard Stoeckner
2025-04-17 19:35:38 +02:00
parent e8113f665d
commit 4159579888
1142 changed files with 309085 additions and 272273 deletions

View File

@@ -24,16 +24,15 @@
#ifndef __DETECT_SELF_HOSTED_H__
#define __DETECT_SELF_HOSTED_H__
// PCI devIds 0x2340-0x237f are for Self-Hosted Hopper
static inline int pci_devid_is_self_hosted_hopper(unsigned short devid)
{
return devid >= 0x2340 && devid <= 0x237f;
return devid >= 0x2340 && devid <= 0x237f; // GH100 Self-Hosted
}
// PCI devIds 0x2940-0x297f are for Self-Hosted Blackwell
static inline int pci_devid_is_self_hosted_blackwell(unsigned short devid)
{
return devid >= 0x2940 && devid <= 0x297f;
return (devid >= 0x2940 && devid <= 0x297f) // GB100 Self-Hosted
|| (devid >= 0x31c0 && devid <= 0x31ff); // GB110 Self-Hosted
}
static inline int pci_devid_is_self_hosted(unsigned short devid)

View File

@@ -669,6 +669,19 @@ nvswitch_os_print
...
);
/*
* Log the given error code via an OS-specifric programmatic API
*/
void
NVSWITCH_PRINT_ATTRIB(3, 4)
nvswitch_os_report_error
(
void *os_handle,
NvU32 error_code,
const char *fmt,
...
);
/*
* "Registry" interface for dword
*/

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -38,7 +38,7 @@ struct lkca_aead_ctx
};
#endif
int libspdm_aead_prealloc(void **context, char const *alg)
static int libspdm_aead_prealloc(void **context, char const *alg)
{
#ifndef USE_LKCA
return -ENODEV;
@@ -175,14 +175,14 @@ static int lkca_aead_internal(struct crypto_aead *aead,
}
#endif
int libspdm_aead_prealloced(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc)
static int libspdm_aead_prealloced(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc)
{
#ifndef USE_LKCA
return -ENODEV;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -50,6 +50,9 @@
#include "ioctl_nvswitch.h"
#define CREATE_TRACE_POINTS
#include "nvswitch_event.h"
static const struct
{
NvlStatus status;
@@ -1892,6 +1895,35 @@ nvswitch_os_print
va_end(arglist);
}
void
nvswitch_os_report_error
(
void *os_handle,
NvU32 error_code,
const char *fmt,
...
)
{
va_list arglist;
char *buffer;
gfp_t gfp = NV_MAY_SLEEP() ? NV_GFP_NO_OOM : NV_GFP_ATOMIC;
struct pci_dev *pdev = (struct pci_dev *)os_handle;
if (pdev == NULL)
return;
va_start(arglist, fmt);
buffer = kvasprintf(gfp, fmt, arglist);
va_end(arglist);
if (buffer == NULL)
return;
trace_nvswitch_dev_sxid(pdev, error_code, buffer);
kfree(buffer);
}
void
nvswitch_os_override_platform
(

View File

@@ -0,0 +1,81 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/backlight.h>
#include "os-interface.h"
#include "nv-linux.h"
NV_STATUS NV_API_CALL nv_get_tegra_brightness_level
(
nv_state_t *nv,
NvU32 *brightness
)
{
#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct backlight_device *bd;
bd = get_backlight_device_by_name(nvl->backlight.device_name);
if (bd == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n");
return NV_ERR_GENERIC;
}
*brightness = bd->props.brightness;
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL nv_set_tegra_brightness_level
(
nv_state_t *nv,
NvU32 brightness
)
{
#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct backlight_device *bd;
bd = get_backlight_device_by_name(nvl->backlight.device_name);
if (bd == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n");
return NV_ERR_GENERIC;
}
bd->props.brightness = brightness;
backlight_update_status(bd);
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,28 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"

877
kernel-open/nvidia/nv-clk.c Normal file
View File

@@ -0,0 +1,877 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-platform.h"
#if defined(NV_SOC_TEGRA_BPMP_ABI_H_PRESENT)
#include <soc/tegra/bpmp-abi.h>
#endif
#if defined(NV_SOC_TEGRA_BPMP_H_PRESENT)
#include <soc/tegra/bpmp.h>
#endif
// Use the CCF APIs if enabled in Kernel config and RM build
// has Dual license define enabled.
#if defined(CONFIG_COMMON_CLK)
#define HAS_COMMON_CLOCK_FRAMEWORK 1
#else
#define HAS_COMMON_CLOCK_FRAMEWORK 0
#endif
#if HAS_COMMON_CLOCK_FRAMEWORK
#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT)
/*!
* @brief The below defined static const array points to the
* clock mentioned in enum defined in below file.
*
* arch/nvalloc/unix/include/nv.h
* enum TEGRASOC_WHICH_CLK
*
* The order should be maintained/updated together.
*/
static const char *osMapClk[] = {
[TEGRASOC_WHICH_CLK_NVDISPLAYHUB] = "nvdisplayhub_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_DISP] = "nvdisplay_disp_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0] = "nvdisplay_p0_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P1] = "nvdisplay_p1_clk",
[TEGRASOC_WHICH_CLK_DPAUX0] = "dpaux0_clk",
[TEGRASOC_WHICH_CLK_FUSE] = "fuse_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_VCO] = "dsipll_vco_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN] = "dsipll_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA] = "dsipll_clkouta_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_VCO] = "sppll0_vco_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA] = "sppll0_clkouta_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB] = "sppll0_clkoutb_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN] = "sppll0_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN] = "sppll1_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV27] = "sppll0_div27_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_DIV27] = "sppll1_div27_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV10] = "sppll0_div10_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV25] = "sppll0_div25_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_VCO] = "sppll1_vco_clk",
[TEGRASOC_WHICH_CLK_VPLL0_REF] = "vpll0_ref_clk",
[TEGRASOC_WHICH_CLK_VPLL0] = "vpll0_clk",
[TEGRASOC_WHICH_CLK_VPLL1] = "vpll1_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF] = "nvdisplay_p0_ref_clk",
[TEGRASOC_WHICH_CLK_RG0] = "rg0_clk",
[TEGRASOC_WHICH_CLK_RG1] = "rg1_clk",
[TEGRASOC_WHICH_CLK_DISPPLL] = "disppll_clk",
[TEGRASOC_WHICH_CLK_DISPHUBPLL] = "disphubpll_clk",
[TEGRASOC_WHICH_CLK_DSI_LP] = "dsi_lp_clk",
[TEGRASOC_WHICH_CLK_DSI_CORE] = "dsi_core_clk",
[TEGRASOC_WHICH_CLK_DSI_PIXEL] = "dsi_pixel_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR0] = "pre_sor0_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR1] = "pre_sor1_clk",
[TEGRASOC_WHICH_CLK_DP_LINKA_REF] = "dp_link_ref_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT] = "sor_linka_input_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO] = "sor_linka_afifo_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M] = "sor_linka_afifo_m_clk",
[TEGRASOC_WHICH_CLK_RG0_M] = "rg0_m_clk",
[TEGRASOC_WHICH_CLK_RG1_M] = "rg1_m_clk",
[TEGRASOC_WHICH_CLK_SOR0_M] = "sor0_m_clk",
[TEGRASOC_WHICH_CLK_SOR1_M] = "sor1_m_clk",
[TEGRASOC_WHICH_CLK_PLLHUB] = "pllhub_clk",
[TEGRASOC_WHICH_CLK_SOR0] = "sor0_clk",
[TEGRASOC_WHICH_CLK_SOR1] = "sor1_clk",
[TEGRASOC_WHICH_CLK_SOR_PADA_INPUT] = "sor_pad_input_clk",
[TEGRASOC_WHICH_CLK_PRE_SF0] = "pre_sf0_clk",
[TEGRASOC_WHICH_CLK_SF0] = "sf0_clk",
[TEGRASOC_WHICH_CLK_SF1] = "sf1_clk",
[TEGRASOC_WHICH_CLK_DSI_PAD_INPUT] = "dsi_pad_input_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR0_REF] = "pre_sor0_ref_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR1_REF] = "pre_sor1_ref_clk",
[TEGRASOC_WHICH_CLK_SOR0_PLL_REF] = "sor0_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR1_PLL_REF] = "sor1_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR0_REF] = "sor0_ref_clk",
[TEGRASOC_WHICH_CLK_SOR1_REF] = "sor1_ref_clk",
[TEGRASOC_WHICH_CLK_OSC] = "osc_clk",
[TEGRASOC_WHICH_CLK_DSC] = "dsc_clk",
[TEGRASOC_WHICH_CLK_MAUD] = "maud_clk",
[TEGRASOC_WHICH_CLK_AZA_2XBIT] = "aza_2xbit_clk",
[TEGRASOC_WHICH_CLK_AZA_BIT] = "aza_bit_clk",
[TEGRASOC_WHICH_CLK_MIPI_CAL] = "mipi_cal_clk",
[TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL] = "uart_fst_mipi_cal_clk",
[TEGRASOC_WHICH_CLK_SOR0_DIV] = "sor0_div_clk",
[TEGRASOC_WHICH_CLK_DISP_ROOT] = "disp_root",
[TEGRASOC_WHICH_CLK_HUB_ROOT] = "hub_root",
[TEGRASOC_WHICH_CLK_PLLA_DISP] = "plla_disp",
[TEGRASOC_WHICH_CLK_PLLA_DISPHUB] = "plla_disphub",
[TEGRASOC_WHICH_CLK_PLLA] = "plla",
[TEGRASOC_WHICH_CLK_EMC] = "emc_clk",
[TEGRASOC_WHICH_CLK_GPU_SYS] = "sysclk",
[TEGRASOC_WHICH_CLK_GPU_NVD] = "nvdclk",
[TEGRASOC_WHICH_CLK_GPU_UPROC] = "uprocclk",
[TEGRASOC_WHICH_CLK_GPU_GPC0] = "gpc0clk",
[TEGRASOC_WHICH_CLK_GPU_GPC1] = "gpc1clk",
[TEGRASOC_WHICH_CLK_GPU_GPC2] = "gpc2clk",
};
#endif
/*!
* @brief Get the clock handles.
*
* Look up and obtain the clock handles for each display
* clock at boot-time and later using all those handles
* for rest of the operations. for example, enable/disable
* clocks, get current/max frequency of the clock.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_clk_get_handles(
nv_state_t *nv)
{
NV_STATUS status = NV_OK;
#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU32 i, j;
int clk_count;
struct clk_bulk_data *clks;
clk_count = devm_clk_bulk_get_all(nvl->dev, &clks);
if (clk_count <= 0)
{
nv_printf(NV_DBG_INFO,"NVRM: No clk handles for the dev\n");
status = NV_ERR_OBJECT_NOT_FOUND;
}
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < clk_count; i++)
{
for (j = 0U; j < TEGRASOC_WHICH_CLK_MAX; j++)
{
if (!strcmp(osMapClk[j], clks[i].id))
{
nvl->soc_clk_handles.clk[j].handles = clks[i].clk;
nvl->soc_clk_handles.clk[j].clkName = __clk_get_name(clks[i].clk);
break;
}
}
if (j == TEGRASOC_WHICH_CLK_MAX)
{
nv_printf(NV_DBG_ERRORS,"NVRM: nv_clk_get_handles, failed to find TEGRA_SOC_WHICH_CLK for %s\n", clks[i].id);
return NV_ERR_OBJECT_NOT_FOUND;
}
}
#else
nv_printf(NV_DBG_INFO, "NVRM: devm_clk_bulk_get_all API is not present\n");
status = NV_ERR_FEATURE_NOT_ENABLED;
#endif
return status;
}
/*!
* @brief Enable the clock.
*
* Enabling the clock before performing any operation
* on it. The below function will prepare the clock for use
* and enable them.
*
* for more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_enable_clk(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
int ret;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
ret = clk_prepare_enable(nvl->soc_clk_handles.clk[whichClkOS].handles);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_prepare_enable failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Disable the clock.
*
* Disabling the clock after performing operation or required
* work with that clock is done with that particular clock.
* The below function will unprepare the clock for further use
* and disable them.
*
* Note: make sure to disable clock before clk_put is called.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*/
void NV_API_CALL nv_disable_clk(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
clk_disable_unprepare(nvl->soc_clk_handles.clk[whichClkOS].handles);
}
/*!
* @brief Get current clock frequency.
*
* Obtain the current clock rate for a clock source.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pCurrFreqKHz Current clock frequency
*/
NV_STATUS NV_API_CALL nv_get_curr_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pCurrFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
unsigned long currFreqHz;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
currFreqHz = clk_get_rate(nvl->soc_clk_handles.clk[whichClkOS].handles);
*pCurrFreqKHz = currFreqHz / 1000U;
if (*pCurrFreqKHz > 0U)
{
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Get maximum clock frequency.
*
* Obtain the maximum clock rate a clock source can provide.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pMaxFreqKHz Maximum clock frequency
*/
NV_STATUS NV_API_CALL nv_get_max_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMaxFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
long ret;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
//
// clk_round_rate(struct clk *clk, rate);
// rate is the maximum possible rate we give,
// it returns rounded clock rate in Hz, i.e.,
// maximum clock rate the source clock can
// support or negative errno.
// Here, rate = NV_S64_MAX
// 0 < currFreq < maxFreq < NV_S64_MAX
// clk_round_rate() round of and return the
// nearest freq what a clock can provide.
// sending NV_S64_MAX will return maxFreq.
//
ret = clk_round_rate(nvl->soc_clk_handles.clk[whichClkOS].handles, NV_U32_MAX);
if (ret >= 0)
{
*pMaxFreqKHz = (NvU32) (ret / 1000);
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Get minimum clock frequency.
*
* Obtain the minimum clock rate a clock source can provide.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pMinFreqKHz Minimum clock frequency
*/
NV_STATUS NV_API_CALL nv_get_min_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMinFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
long ret;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
//
// clk_round_rate(struct clk *clk, rate);
// rate is the minimum possible rate we give,
// it returns rounded clock rate in Hz, i.e.,
// minimum clock rate the source clock can
// support or negative errno.
// Here, rate = NV_S64_MAX
// 0 < minFreq currFreq < maxFreq < NV_S64_MAX
// clk_round_rate() round of and return the
// nearest freq what a clock can provide.
// sending 0 will return minFreq.
//
ret = clk_round_rate(nvl->soc_clk_handles.clk[whichClkOS].handles, 0);
if (ret >= 0)
{
*pMinFreqKHz = (NvU32) (ret / 1000);
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief set clock frequency.
*
* Setting the frequency of clock source.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[in] reqFreqKHz Required frequency
*/
NV_STATUS NV_API_CALL nv_set_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 reqFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
int ret;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
ret = clk_set_rate(nvl->soc_clk_handles.clk[whichClkOS].handles,
reqFreqKHz * 1000U);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_REQUEST;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_rate failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
#else
NV_STATUS NV_API_CALL nv_clk_get_handles
(
nv_state_t *nv
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_enable_clk
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS
)
{
return NV_ERR_NOT_SUPPORTED;
}
void NV_API_CALL nv_disable_clk
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS
)
{
return;
}
NV_STATUS NV_API_CALL nv_get_curr_freq
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS,
NvU32 *pCurrFreqKHz
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_get_max_freq
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS,
NvU32 *pMaxFreqKHz
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_get_min_freq
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS,
NvU32 *pMinFreqKHz
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_set_freq
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS,
NvU32 freqKHz
)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif
/*!
* @brief Clear the clock handles assigned by nv_clk_get_handles()
*
* Clear the clock handle for each display of the clocks at shutdown-time.
* Since clock handles are obtained by devm managed devm_clk_bulk_get_all()
* API, devm_clk_bulk_release_all() API is called on all the enumerated
* clk handles automatically when module gets unloaded. Hence, no need
* to explicitly free those handles.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
*/
void NV_API_CALL nv_clk_clear_handles(
nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU32 i;
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++)
{
if (nvl->soc_clk_handles.clk[i].handles != NULL)
{
nvl->soc_clk_handles.clk[i].handles = NULL;
}
}
}
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
/*!
* @brief set parent clock.
*
* Setting the parent clock of clock source.
* This is only valid once the clock source and the parent
* clock have been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOSsource Enum value of the source clock
* @param[in] whichClkOSparent Enum value of the parent clock
*/
NV_STATUS NV_API_CALL nv_set_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK whichClkOSparent
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
int ret;
if ((nvl->soc_clk_handles.clk[whichClkOSsource].handles != NULL) &&
(nvl->soc_clk_handles.clk[whichClkOSparent].handles != NULL))
{
ret = clk_set_parent(nvl->soc_clk_handles.clk[whichClkOSsource].handles,
nvl->soc_clk_handles.clk[whichClkOSparent].handles);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_REQUEST;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_parent failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief get parent clock.
*
* Getting the parent clock of clock source.
* This is only valid once the clock source and the parent
* clock have been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOSsource Enum value of the source clock
* @param[in] pWhichClkOSparent Enum value of the parent clock
*/
NV_STATUS NV_API_CALL nv_get_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK *pWhichClkOSparent
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct clk *ret;
NvU32 i;
if (nvl->soc_clk_handles.clk[whichClkOSsource].handles != NULL)
{
ret = clk_get_parent(nvl->soc_clk_handles.clk[whichClkOSsource].handles);
if (!IS_ERR_OR_NULL(ret))
{
const char *parentClkName = __clk_get_name(ret);
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++)
{
//
// soc_clk_handles has array of clks supported on all chips.
// So depending on the chip, some clks may not be present.
//
if (nvl->soc_clk_handles.clk[i].clkName == NULL)
{
continue;
}
if (!strcmp(nvl->soc_clk_handles.clk[i].clkName, parentClkName))
{
*pWhichClkOSparent = i;
return NV_OK;
}
}
nv_printf(NV_DBG_ERRORS, "NVRM: unexpected parent clock ref addr: %p\n", ret);
return NV_ERR_INVALID_OBJECT_PARENT;
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: clk_get_parent failed with error: %ld\n", PTR_ERR(ret));
return NV_ERR_INVALID_POINTER;
}
}
nv_printf(NV_DBG_ERRORS, "NVRM: invalid source clock requested\n");
return NV_ERR_OBJECT_NOT_FOUND;
}
/*!
* @brief Check if clock is enable or not.
*
* Checking the clock status if it is enabled or not before
* enabling or disabling it.
*
* for more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*
* @returns clock status.
*/
NvBool NV_API_CALL nv_is_clk_enabled(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
bool ret = false;
if (nvl->soc_clk_handles.clk[whichClkOS].handles == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: clock handle requested not found.\n");
return NV_FALSE;
}
ret = __clk_is_enabled(nvl->soc_clk_handles.clk[whichClkOS].handles);
return ret == true;
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_init
(
nv_state_t *nv,
NvU32 link_rate,
NvU32 lanes_bitmap
)
{
#if defined(NV_SOC_TEGRA_BPMP_ABI_H_PRESENT) && defined(NV_CMD_UPHY_DISPLAY_PORT_INIT_PRESENT)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
struct mrq_uphy_response resp;
int rc;
NV_STATUS status = NV_OK;
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_GENERIC;
}
req.cmd = CMD_UPHY_DISPLAY_PORT_INIT;
req.display_port_init.link_rate = link_rate;
req.display_port_init.lanes_bitmap = lanes_bitmap;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_UPHY;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
rc = tegra_bpmp_transfer(bpmp, &msg);
if (rc)
{
nv_printf(NV_DBG_ERRORS, "DP UPHY pll initialization failed, rc - %d\n", rc);
status = NV_ERR_GENERIC;
}
tegra_bpmp_put(bpmp);
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *nv)
{
#if defined(NV_SOC_TEGRA_BPMP_ABI_H_PRESENT) && defined(NV_CMD_UPHY_DISPLAY_PORT_OFF_PRESENT)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
struct mrq_uphy_response resp;
int rc;
NV_STATUS status = NV_OK;
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_GENERIC;
}
req.cmd = CMD_UPHY_DISPLAY_PORT_OFF;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_UPHY;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
rc = tegra_bpmp_transfer(bpmp, &msg);
if (rc)
{
nv_printf(NV_DBG_ERRORS, "DP UPHY pll de-initialization failed, rc - %d\n", rc);
status = NV_ERR_GENERIC;
}
tegra_bpmp_put(bpmp);
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
#else
NV_STATUS NV_API_CALL nv_set_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK whichClkOSparent
)
{
return NV_ERR_NOT_SUPPORTED;
}
NvBool NV_API_CALL nv_is_clk_enabled(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
return NV_FALSE;
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *nv)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_get_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK *pWhichClkOSparent
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_init
(
nv_state_t *nv,
NvU32 link_rate,
NvU32 lanes_bitmap
)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,273 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 - 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "os_gpio.h"
#define NV_GPIOF_DIR_IN (1 << 0)
/*!
* @brief Mapping array of OS GPIO function ID to OS function name,
* this name is used to get GPIO number from Device Tree.
*/
static const char *osMapGpioFunc[] = {
[NV_OS_GPIO_FUNC_HOTPLUG_A] = "os_gpio_hotplug_a",
[NV_OS_GPIO_FUNC_HOTPLUG_B] = "os_gpio_hotplug_b",
};
NV_STATUS NV_API_CALL nv_gpio_get_pin_state
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 *pinValue
)
{
int ret;
#if defined(NV_GPIO_GET_VALUE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
ret = gpio_get_value(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n");
return NV_ERR_GENERIC;
#endif
if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
*pinValue = ret;
return NV_OK;
}
void NV_API_CALL nv_gpio_set_pin_state
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 pinValue
)
{
#if defined(NV_GPIO_SET_VALUE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
gpio_set_value(pinNum, pinValue);
#else
nv_printf(NV_DBG_ERRORS, "gpio_set_value not present\n");
#endif
}
NV_STATUS NV_API_CALL nv_gpio_set_pin_direction
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 direction
)
{
int ret;
if (direction)
{
#if defined(NV_GPIO_DIRECTION_INPUT_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
ret = gpio_direction_input(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_direction_input not present\n");
return NV_ERR_GENERIC;
#endif
}
else
{
#if defined(NV_GPIO_DIRECTION_OUTPUT_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
ret = gpio_direction_output(pinNum, 0);
#else
nv_printf(NV_DBG_ERRORS, "gpio_direction_output not present\n");
return NV_ERR_GENERIC;
#endif
}
if (ret)
{
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
return NV_OK;
}
NV_STATUS NV_API_CALL nv_gpio_get_pin_direction
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 *direction
)
{
/*!
* TODO: Commenting out until gpio_get_direction wrapper
* support is added in kernel.
*/
#if 0
int ret;
ret = nv_gpio_get_direction(pinNum);
if (ret)
{
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
*direction = ret;
#endif
return NV_OK;
}
NV_STATUS NV_API_CALL nv_gpio_get_pin_number
(
nv_state_t *nv,
NvU32 function,
NvU32 *pinNum
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
(void)nvl;
#if defined(NV_OF_GET_NAME_GPIO_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
rc = of_get_named_gpio(nvl->dev->of_node, osMapGpioFunc[function], 0);
#else
nv_printf(NV_DBG_ERRORS, "of_get_named_gpio not present\n");
return NV_ERR_GENERIC;
#endif
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "of_get_name_gpio failed for gpio - %s, rc - %d\n",
osMapGpioFunc[function], rc);
return NV_ERR_GENERIC;
}
*pinNum = rc;
#if defined(NV_DEVM_GPIO_REQUEST_ONE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
rc = devm_gpio_request_one(nvl->dev, *pinNum, NV_GPIOF_DIR_IN,
osMapGpioFunc[function]);
#else
nv_printf(NV_DBG_ERRORS, "devm_gpio_request_one not present\n");
return NV_ERR_GENERIC;
#endif
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "request gpio failed for gpio - %s, rc - %d\n",
osMapGpioFunc[function], rc);
return NV_ERR_GENERIC;
}
return NV_OK;
}
NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 direction
)
{
NvU32 irqGpioPin;
NvU32 pinValue;
if (nv_get_current_irq_type(nv) != NV_SOC_IRQ_GPIO_TYPE)
{
return NV_FALSE;
}
nv_get_current_irq_priv_data(nv, &irqGpioPin);
if (pinNum != irqGpioPin)
{
return NV_FALSE;
}
#if defined(NV_GPIO_GET_VALUE_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
pinValue = gpio_get_value(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n");
return NV_FALSE;
#endif
if (pinValue != direction)
{
return NV_FALSE;
}
return NV_TRUE;
}
NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt
(
nv_state_t * nv,
NvU32 pinNum,
NvU32 trigger_level
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
int irq_num;
#if defined(NV_GPIO_TO_IRQ_PRESENT) && NV_SUPPORTS_PLATFORM_DEVICE
irq_num = gpio_to_irq(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_to_irq not present\n");
return NV_ERR_GENERIC;
#endif
/*
* Ignore setting interrupt for falling trigger for hotplug gpio pin
* as hotplug sequence calls this function twice to set the level
* (rising/falling) of interrupt for same gpio pin. Linux interrupt
* registration allows only once to register the interrupt with required
* trigger levels. So to avoid re-registration, skip registering for
* falling trigger level but when this function called with rising trigger
* then itself register for both rising/falling triggers.
*/
if (trigger_level == 0)
{
return NV_OK;
}
rc = nv_request_soc_irq(nvl, irq_num, NV_SOC_IRQ_GPIO_TYPE,
(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_ONESHOT), pinNum,
"hdmi-hotplug");
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "IRQ registration failed for gpio - %d, rc - %d\n",
pinNum, rc);
return NV_ERR_GENERIC;
}
/* Disable the irq after registration as RM init sequence re-enables it */
disable_irq_nosync(irq_num);
return NV_OK;
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2005-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,6 +27,7 @@
#include "os-interface.h"
#include "nv-linux.h"
#include "nvi2c.h"
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
@@ -286,7 +287,238 @@ void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
}
}
#else // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
static struct i2c_client * nv_i2c_register_client(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
int c_index;
struct i2c_board_info i2c_dev_info = {
.type = "tegra_display",
.addr = address,
};
/* Get the adapter using i2c port */
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
if (i2c_adapter == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)",
linuxI2CSwPort);
return NULL;
}
#if defined(NV_I2C_NEW_CLIENT_DEVICE_PRESENT)
client = i2c_new_client_device(i2c_adapter, &i2c_dev_info);
#else
nv_printf(NV_DBG_ERRORS, "nv_i2c_new_device not present\n");
client = NULL;
#endif
if (client == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to register client for address(0x%x)",
address);
i2c_put_adapter(i2c_adapter);
return NULL;
}
i2c_put_adapter(i2c_adapter);
/* Save the Port and i2c client */
nvl->i2c_clients[linuxI2CSwPort].port = linuxI2CSwPort;
for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++)
{
if (nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] == NULL)
{
nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] = client;
break;
}
}
return client;
}
static struct i2c_client *nv_i2c_get_registered_client(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int c_index;
for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++)
{
struct i2c_client *client;
client = (struct i2c_client *)nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index];
if (client)
{
if (address == (NvU8)client->addr)
{
return client;
}
}
else
{
break;
}
}
return NULL;
}
NV_STATUS NV_API_CALL nv_i2c_transfer(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address,
nv_i2c_msg_t *nv_msgs,
int num_msgs
)
{
struct i2c_client *client;
struct i2c_msg *msgs;
int count;
int rc;
NV_STATUS status = NV_OK;
//
// RM style client address is 8-bit addressing, but Linux use 7-bit
// addressing, so convert to 7-bit addressing format.
//
address = address >> 1;
//
// Check if its valid port
//
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
{
nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort);
return NV_ERR_INVALID_ARGUMENT;
}
for (count = 0; count < num_msgs; count++) {
//
// RM style client address is 8-bit addressing, but Linux use 7-bit
// addressing, so convert to 7-bit addressing format.
//
nv_msgs[count].addr = nv_msgs[count].addr >> 1;
client = nv_i2c_get_registered_client(nv, linuxI2CSwPort, nv_msgs[count].addr);
if (client == NULL)
{
client = nv_i2c_register_client(nv, linuxI2CSwPort, nv_msgs[count].addr);
if (client == NULL)
{
nv_printf(NV_DBG_ERRORS, "i2c client register failed for addr:0x%x\n",
nv_msgs[count].addr);
return NV_ERR_GENERIC;
}
}
}
msgs = kzalloc((num_msgs * sizeof(*msgs)), GFP_KERNEL);
if (msgs == NULL)
{
nv_printf(NV_DBG_ERRORS, "i2c message allocation failed\n");
return NV_ERR_NO_MEMORY;
}
for (count = 0; count < num_msgs; count++) {
msgs[count].addr = nv_msgs[count].addr;
msgs[count].flags = nv_msgs[count].flags;
msgs[count].len = nv_msgs[count].len;
msgs[count].buf = nv_msgs[count].buf;
}
rc = i2c_transfer(client->adapter, msgs, num_msgs);
if (rc != num_msgs)
{
nv_printf(NV_DBG_ERRORS, "i2c transfer failed for addr:0x%x",
address);
status = NV_ERR_GENERIC;
}
kfree(msgs);
return status;
}
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int p_index, c_index;
for (p_index = 0; p_index < MAX_TEGRA_I2C_PORTS; p_index++)
{
for (c_index = 0;
c_index < MAX_CLIENTS_PER_ADAPTER;
c_index++)
{
struct i2c_client *client;
client = (struct i2c_client *)nvl->i2c_clients[p_index].pOsClient[c_index];
if (client)
{
#if defined(NV_I2C_UNREGISTER_DEVICE_PRESENT)
i2c_unregister_device(client);
#else
nv_printf(NV_DBG_ERRORS, "i2c_unregister_device not present\n");
#endif
nvl->i2c_clients[p_index].pOsClient[c_index] = NULL;
}
}
}
}
NV_STATUS NV_API_CALL nv_i2c_bus_status(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvS32 *scl,
NvS32 *sda)
{
#if NV_IS_EXPORT_SYMBOL_PRESENT_i2c_bus_status
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *i2c_adapter;
int ret;
//
// Check if its valid port
//
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
{
nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort);
return NV_ERR_INVALID_ARGUMENT;
}
/* Get the adapter using i2c port */
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
if (i2c_adapter == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)",
linuxI2CSwPort);
return NULL;
}
ret = i2c_bus_status(i2c_adapter, scl, sda);
if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "i2c_bus_status failed:%d\n", ret);
return NV_ERR_GENERIC;
}
i2c_put_adapter(i2c_adapter);
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
#endif // NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
#if !defined(CONFIG_I2C) && !defined(CONFIG_I2C_MODULE)
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
{
@@ -296,5 +528,33 @@ void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
{
return NULL;
}
#endif
#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
#if !NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
NV_STATUS NV_API_CALL nv_i2c_transfer(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address,
nv_i2c_msg_t *nv_msgs,
int num_msgs
)
{
return NV_OK;
}
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv)
{
}
NV_STATUS NV_API_CALL nv_i2c_bus_status(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvS32 *scl,
NvS32 *sda)
{
return NV_ERR_GENERIC;
}
#endif

View File

@@ -0,0 +1,28 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"

View File

@@ -0,0 +1,30 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "dce_rm_client_ipc.h"

View File

@@ -115,7 +115,10 @@ void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line)
nv_memdbg_node_t *node;
unsigned long flags;
WARN_ON(addr == NULL);
if (addr == NULL)
{
return;
}
/* If node allocation fails, we can still update the untracked counters */
node = kmalloc(sizeof(*node),
@@ -154,6 +157,11 @@ void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line)
nv_memdbg_node_t *node;
unsigned long flags;
if (addr == NULL)
{
return;
}
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
node = nv_memdbg_remove_node(addr);

View File

@@ -161,16 +161,7 @@ nvidia_vma_access(
if (pageIndex >= at->num_pages)
return -EINVAL;
/*
* For PPC64LE build, nv_array_index_no_speculate() is not defined
* therefore call nv_speculation_barrier().
* When this definition is added, this platform check should be removed.
*/
#if !defined(NVCPU_PPC64LE)
pageIndex = nv_array_index_no_speculate(pageIndex, at->num_pages);
#else
nv_speculation_barrier();
#endif
kernel_mapping = (void *)(at->page_table[pageIndex]->virt_addr + pageOffset);
}
else
@@ -293,8 +284,7 @@ static vm_fault_t nvidia_fault(
NvU64 numPages = mmap_context->memArea.pRanges[idx].size >> PAGE_SHIFT;
while (numPages != 0)
{
ret = nv_insert_pfn(vma, curOffs + vma->vm_start, pfn,
mmap_context->remap_prot_extra);
ret = nv_insert_pfn(vma, curOffs + vma->vm_start, pfn);
if (ret != VM_FAULT_NOPAGE)
{
goto err;
@@ -407,7 +397,7 @@ static int nvidia_mmap_peer_io(
start = at->page_table[page_index]->phys_addr;
size = pages * PAGE_SIZE;
ret = nv_io_remap_page_range(vma, start, size, 0, vma->vm_start);
ret = nv_io_remap_page_range(vma, start, size, vma->vm_start);
return ret;
}
@@ -428,16 +418,7 @@ static int nvidia_mmap_sysmem(
start = vma->vm_start;
for (j = page_index; j < (page_index + pages); j++)
{
/*
* For PPC64LE build, nv_array_index_no_speculate() is not defined
* therefore call nv_speculation_barrier().
* When this definition is added, this platform check should be removed.
*/
#if !defined(NVCPU_PPC64LE)
j = nv_array_index_no_speculate(j, (page_index + pages));
#else
nv_speculation_barrier();
#endif
if (
#if defined(NV_VGPU_KVM_BUILD)
@@ -451,7 +432,7 @@ static int nvidia_mmap_sysmem(
else
{
if (at->flags.unencrypted)
vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot, 0);
vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot);
ret = vm_insert_page(vma, start,
NV_GET_PAGE_STRUCT(at->page_table[j]->phys_addr));
@@ -551,7 +532,6 @@ int nvidia_mmap_helper(
*/
if (!NV_IS_CTL_DEVICE(nv))
{
NvU32 remap_prot_extra = mmap_context->remap_prot_extra;
NvU64 access_start = mmap_context->access_start;
NvU64 access_len = mmap_context->access_size;
@@ -624,7 +604,7 @@ int nvidia_mmap_helper(
if (nv_io_remap_page_range(vma,
mmap_context->memArea.pRanges[idx].start,
mmap_context->memArea.pRanges[idx].size,
remap_prot_extra, vma->vm_start + curOffs) != 0)
vma->vm_start + curOffs) != 0)
{
up(&nvl->mmap_lock);
return -EAGAIN;

View File

@@ -30,11 +30,7 @@
#include "os-interface.h"
#include "nv-linux.h"
#if !defined(NVCPU_PPC64LE)
#define NV_NANO_TIMER_USE_HRTIMER 1
#else
#define NV_NANO_TIMER_USE_HRTIMER 0
#endif // !defined(NVCPU_PPC64LE)
struct nv_nano_timer
{

View File

@@ -1004,61 +1004,8 @@ int nvidia_p2p_get_rsync_registers(
nvidia_p2p_rsync_reg_info_t **reg_info
)
{
nv_linux_state_t *nvl;
NV_STATUS status;
NvU32 index = 0;
NvU32 count = 0;
nvidia_p2p_rsync_reg_info_t *info = NULL;
nvidia_p2p_rsync_reg_t *regs = NULL;
if (reg_info == NULL)
{
return -EINVAL;
}
status = os_alloc_mem((void**)&info, sizeof(*info));
if (status != NV_OK)
{
return -ENOMEM;
}
memset(info, 0, sizeof(*info));
info->version = NVIDIA_P2P_RSYNC_REG_INFO_VERSION;
LOCK_NV_LINUX_DEVICES();
for (nvl = nv_linux_devices; nvl; nvl = nvl->next)
{
count++;
}
status = os_alloc_mem((void**)&regs, (count * sizeof(*regs)));
if (status != NV_OK)
{
nvidia_p2p_put_rsync_registers(info);
UNLOCK_NV_LINUX_DEVICES();
return -ENOMEM;
}
// TODO: This function will always fail with -ENODEV because the logic that
// incremented 'index' was removed. It should be cleaned up in a future
// change.
UNLOCK_NV_LINUX_DEVICES();
info->regs = regs;
info->entries = index;
if (info->entries == 0)
{
nvidia_p2p_put_rsync_registers(info);
return -ENODEV;
}
*reg_info = info;
return 0;
// TODO: Remove this interface.
return -ENODEV;
}
NV_EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
@@ -1067,30 +1014,8 @@ void nvidia_p2p_put_rsync_registers(
nvidia_p2p_rsync_reg_info_t *reg_info
)
{
NvU32 i;
nvidia_p2p_rsync_reg_t *regs = NULL;
if (reg_info == NULL)
{
return;
}
if (reg_info->regs)
{
for (i = 0; i < reg_info->entries; i++)
{
regs = &reg_info->regs[i];
if (regs->ptr)
{
nv_iounmap(regs->ptr, regs->size);
}
}
os_free_mem(reg_info->regs);
}
os_free_mem(reg_info);
// TODO: Remove this interface. There is nothing to do because
// nvidia_p2p_get_rsync_registers always fails.
}
NV_EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);

View File

@@ -453,35 +453,19 @@ typedef struct nvidia_p2p_rsync_reg_info {
/*
* @brief
* Gets rsync (GEN-ID) register information associated with the supported
* NPUs.
*
* The caller would use the returned information {GPU device, NPU device,
* socket-id, cluster-id} to pick the optimal generation registers to issue
* RSYNC (NVLink HW flush).
*
* The interface allocates structures to return the information, hence
* nvidia_p2p_put_rsync_registers() must be called to free the structures.
*
* Note, cluster-id is hardcoded to zero as early system configurations would
* only support cluster mode i.e. all devices would share the same cluster-id
* (0). In the future, appropriate kernel support would be needed to query
* cluster-ids.
*
* @param[out] reg_info
* A pointer to the rsync reg info structure.
* This interface is no longer supported and will always return an error. It
* is left in place (for now) to allow third-party callers to build without
* any errors.
*
* @Returns
* 0 Upon successful completion. Otherwise, returns negative value.
* -ENODEV
*/
int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info);
/*
* @brief
* Frees the structures allocated by nvidia_p2p_get_rsync_registers().
*
* @param[in] reg_info
* A pointer to the rsync reg info structure.
* This interface is no longer supported. It is left in place (for now) to
* allow third-party callers to build without any errors.
*/
void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info);

View File

@@ -337,7 +337,7 @@ static NvU32 find_gpu_numa_nodes_in_srat(nv_linux_state_t *nvl)
* supports.
*/
while (subtable_header_length &&
(((unsigned long)subtable_header) + subtable_header_length < table_end)) {
(((unsigned long)subtable_header) + subtable_header_length <= table_end)) {
if (subtable_header->type == ACPI_SRAT_TYPE_GENERIC_AFFINITY) {
NvU8 busAtByte2, busAtByte3;
@@ -532,12 +532,6 @@ nv_init_coherent_link_info
NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tNVRM: GPU memory NUMA node: %u\n", node);
}
#if NV_IS_EXPORT_SYMBOL_GPL_pci_ats_supported
nv->ats_support = pci_ats_supported(nvl->pci_dev);
#elif defined(NV_PCI_DEV_HAS_ATS_ENABLED)
nv->ats_support = nvl->pci_dev->ats_enabled;
#endif
if (NVreg_EnableUserNUMAManagement && !os_is_vgx_hyper())
{
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE);
@@ -620,6 +614,19 @@ nv_pci_probe
}
#endif /* NV_PCI_SRIOV_SUPPORT */
if (!rm_wait_for_bar_firewall(
sp,
NV_PCI_DOMAIN_NUMBER(pci_dev),
NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev),
PCI_FUNC(pci_dev->devfn),
pci_dev->device))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: failed to wait for bar firewall to lower\n");
goto failed;
}
if (!rm_is_supported_pci_device(
(pci_dev->class >> 16) & 0xFF,
(pci_dev->class >> 8) & 0xFF,
@@ -658,63 +665,12 @@ nv_pci_probe
{
if (NV_PCI_RESOURCE_VALID(pci_dev, i))
{
#if defined(NV_PCI_MAX_MMIO_BITS_SUPPORTED)
if ((NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_TYPE_64) &&
((NV_PCI_RESOURCE_START(pci_dev, i) >> NV_PCI_MAX_MMIO_BITS_SUPPORTED)))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: This is a 64-bit BAR mapped above %dGB by the system\n"
"NVRM: BIOS or the %s kernel. This PCI I/O region assigned\n"
"NVRM: to your NVIDIA device is not supported by the kernel.\n"
"NVRM: BAR%d is %dM @ 0x%llx (PCI:%04x:%02x:%02x.%x)\n",
(1 << (NV_PCI_MAX_MMIO_BITS_SUPPORTED - 30)),
NV_KERNEL_NAME, i,
(NV_PCI_RESOURCE_SIZE(pci_dev, i) >> 20),
(NvU64)NV_PCI_RESOURCE_START(pci_dev, i),
NV_PCI_DOMAIN_NUMBER(pci_dev),
NV_PCI_BUS_NUMBER(pci_dev), NV_PCI_SLOT_NUMBER(pci_dev),
PCI_FUNC(pci_dev->devfn));
goto failed;
}
#endif
if ((NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_TYPE_64) &&
(NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_PREFETCH))
{
struct pci_dev *bridge = pci_dev->bus->self;
NvU32 base_upper, limit_upper;
last_bar_64bit = NV_TRUE;
if (bridge == NULL)
goto next_bar;
pci_read_config_dword(pci_dev, NVRM_PCICFG_BAR_OFFSET(i) + 4,
&base_upper);
if (base_upper == 0)
goto next_bar;
pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
&base_upper);
pci_read_config_dword(bridge, PCI_PREF_LIMIT_UPPER32,
&limit_upper);
if ((base_upper != 0) && (limit_upper != 0))
goto next_bar;
nv_printf(NV_DBG_ERRORS,
"NVRM: This is a 64-bit BAR mapped above 4GB by the system\n"
"NVRM: BIOS or the %s kernel, but the PCI bridge\n"
"NVRM: immediately upstream of this GPU does not define\n"
"NVRM: a matching prefetchable memory window.\n",
NV_KERNEL_NAME);
nv_printf(NV_DBG_ERRORS,
"NVRM: This may be due to a known Linux kernel bug. Please\n"
"NVRM: see the README section on 64-bit BARs for additional\n"
"NVRM: information.\n");
goto failed;
}
next_bar:
//
// If we are here, then we have found a valid BAR -- 32 or 64-bit.
//
@@ -831,25 +787,33 @@ next_bar:
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_DISABLED);
nvl->numa_info.node_id = NUMA_NO_NODE;
#if NV_IS_EXPORT_SYMBOL_GPL_pci_ats_supported
nv->ats_support = pci_ats_supported(nvl->pci_dev);
#elif defined(NV_PCI_DEV_HAS_ATS_ENABLED)
nv->ats_support = nvl->pci_dev->ats_enabled;
#endif
if (pci_devid_is_self_hosted(pci_dev->device))
{
nv_init_coherent_link_info(nv);
}
#if defined(NVCPU_PPC64LE)
// Use HW NUMA support as a proxy for ATS support. This is true in the only
// PPC64LE platform where ATS is currently supported (IBM P9).
nv->ats_support = nv_platform_supports_numa(nvl);
#endif
if (nv->ats_support)
{
NV_DEV_PRINTF(NV_DBG_INFO, nv, "ATS supported by this GPU!\n");
}
nv_ats_supported |= nv->ats_support;
if (nv->pci_info.vendor_id == 0x10de && nv->pci_info.device_id == 0x2e2a) {
/* Disable advertising ATS support to UVM when GB20B is present */
nv_ats_supported = NV_FALSE;
}
nv_clk_get_handles(nv);
pci_set_master(pci_dev);
#if defined(CONFIG_VGA_ARB) && !defined(NVCPU_PPC64LE)
#if defined(CONFIG_VGA_ARB)
#if defined(VGA_DEFAULT_DEVICE)
#if defined(NV_VGA_TRYGET_PRESENT)
vga_tryget(VGA_DEFAULT_DEVICE, VGA_RSRC_LEGACY_MASK);
@@ -945,6 +909,12 @@ next_bar:
* after enabling dynamic power management.
*/
rm_enable_dynamic_power_management(sp, nv);
/*
* This must be the last action in nv_pci_probe(). Do not add code after this line.
*/
rm_notify_gpu_addition(sp, nv);
nv_kmem_cache_free_stack(sp);
return 0;
@@ -963,6 +933,7 @@ err_add_device:
err_zero_dev:
rm_free_private_state(sp, nv);
err_not_supported:
nv_clk_clear_handles(nv);
nv_ats_supported = prev_nv_ats_supported;
nv_lock_destroy_locks(sp, nv);
if (nvl != NULL)
@@ -1024,6 +995,9 @@ nv_pci_remove(struct pci_dev *pci_dev)
LOCK_NV_LINUX_DEVICES();
down(&nvl->ldata_lock);
nv->flags |= NV_FLAG_PCI_REMOVE_IN_PROGRESS;
rm_notify_gpu_removal(sp, nv);
/*
* Sanity check: A removed device shouldn't have a non-zero usage_count.
@@ -1083,6 +1057,8 @@ nv_pci_remove(struct pci_dev *pci_dev)
/* Remove proc entry for this GPU */
nv_procfs_remove_gpu(nvl);
nv_clk_clear_handles(nv);
rm_cleanup_dynamic_power_management(sp, nv);
nv->removed = NV_TRUE;

View File

@@ -0,0 +1,130 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
/*!
* @brief Unpowergate the display.
*
* Increment the device's usage counter, run pm_request_resume(dev)
* and return its result.
*
* For more details on runtime pm functions, please check the below
* files in the Linux kernel:
*
* include/linux/pm_runtime.h
* include/linux/pm.h
* or
* https://www.kernel.org/doc/Documentation/power/runtime_pm.txt
*
* pm_request_resume() submits a request to execute the subsystem-level
* resume callback for the device (the request is represented by a work
* item in pm_wq); returns 0 on success, 1 if the device's runtime PM
* status was already 'active', or error code if the request hasn't
* been queued up.
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_soc_pm_unpowergate(
nv_state_t *nv)
{
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvS32 ret = -EBUSY;
ret = pm_runtime_get(nvl->dev);
if (ret == 1)
{
nv_printf(NV_DBG_INFO, "NVRM: device was already unpowergated\n");
}
else if (ret == -EINPROGRESS)
{
/*
* pm_runtime_get() internally calls __pm_runtime_resume(...RPM_ASYNC)
* which internally calls rpm_resume() and this function will throw
* "-EINPROGRESS" if it is being called when device state is
* RPM_RESUMING and RPM_ASYNC or RPM_NOWAIT is set.
*/
nv_printf(NV_DBG_INFO, "NVRM: device is already unpowergating\n");
}
else if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: unpowergate unsuccessful. ret: %d\n", ret);
return NV_ERR_GENERIC;
}
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Powergate the display.
*
* Decrement the device's usage counter; if the result is 0 then run
* pm_request_idle(dev) and return its result.
*
* For more details on runtime pm functions, please check the below
* files in the Linux kernel:
*
* include/linux/pm_runtime.h
* include/linux/pm.h
* or
* https://www.kernel.org/doc/Documentation/power/runtime_pm.txt
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_soc_pm_powergate(
nv_state_t *nv)
{
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
NV_STATUS status = NV_ERR_GENERIC;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvS32 ret = -EBUSY;
ret = pm_runtime_put(nvl->dev);
if (ret == 0)
{
status = NV_OK;
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: powergate unsuccessful. ret: %d\n", ret);
}
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

File diff suppressed because it is too large Load Diff

View File

@@ -24,9 +24,11 @@
#define __NO_VERSION__
#include "nv-linux.h"
#include "os-interface.h"
#include "nv-report-err.h"
#define CREATE_TRACE_POINTS
#include "nv-tracepoint.h"
nv_report_error_cb_t nv_error_cb_handle = NULL;
int nvidia_register_error_cb(nv_report_error_cb_t report_error_cb)
@@ -64,14 +66,15 @@ void nv_report_error(
char *buffer;
gfp_t gfp = NV_MAY_SLEEP() ? NV_GFP_NO_OOM : NV_GFP_ATOMIC;
if (nv_error_cb_handle == NULL)
return;
buffer = kvasprintf(gfp, format, ap);
if (buffer == NULL)
return;
nv_error_cb_handle(dev, error_number, buffer, strlen(buffer) + 1);
trace_nvidia_dev_xid(dev, error_number, buffer);
if (nv_error_cb_handle != NULL)
nv_error_cb_handle(dev, error_number, buffer, strlen(buffer) + 1);
kfree(buffer);
}

View File

@@ -0,0 +1,66 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "conftest.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM nvidia
#if !defined(_TRACE_NV_REPORT_ERR_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_NV_REPORT_ERR_H
#include <linux/tracepoint.h>
TRACE_EVENT(nvidia_dev_xid,
TP_PROTO(const struct pci_dev *pdev, uint32_t error_code, const char *msg),
TP_ARGS(pdev, error_code, msg),
TP_STRUCT__entry(
__string(dev, pci_name(pdev))
__field (u32, error_code)
__string(msg, msg)
),
TP_fast_assign(
#if NV_ASSIGN_STR_ARGUMENT_COUNT == 1
__assign_str(dev);
__assign_str(msg);
#else
__assign_str(dev, pci_name(pdev));
__assign_str(msg, msg);
#endif
__entry->error_code = error_code;
),
TP_printk("Xid (PCI:%s): %u, %s", __get_str(dev), __entry->error_code, __get_str(msg))
);
#endif // !defined(_TRACE_NV_REPORT_ERR_H) || defined(TRACE_HEADER_MULTI_READ)
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE nv-tracepoint
#include <trace/define_trace.h>

View File

@@ -90,7 +90,6 @@ NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
}
nvamc->access_start = nvuap->access_start;
nvamc->access_size = nvuap->access_size;
nvamc->remap_prot_extra = nvuap->remap_prot_extra;
}
nvamc->prot = prot;
@@ -135,38 +134,22 @@ NV_STATUS NV_API_CALL nv_free_user_mapping(
}
/*
* This function adjust the {mmap,access}_{start,size} to reflect platform-specific
* mechanisms for isolating mappings at a finer granularity than the os_page_size
* This function checks if a user mapping should be allowed given the GPU's 4K
* page isolation requirements.
*/
NV_STATUS NV_API_CALL nv_get_usermap_access_params(
NV_STATUS NV_API_CALL nv_check_usermap_access_params(
nv_state_t *nv,
nv_usermap_access_params_t *nvuap
const nv_usermap_access_params_t *nvuap
)
{
NvU64 addr = nvuap->addr;
NvU64 size = nvuap->size;
const NvU64 addr = nvuap->addr;
const NvU64 size = nvuap->size;
nvuap->remap_prot_extra = 0;
/*
* Do verification and cache encoding based on the original
* (ostensibly smaller) mmap request, since accesses should be
* restricted to that range.
*/
if (rm_gpu_need_4k_page_isolation(nv) &&
NV_4K_PAGE_ISOLATION_REQUIRED(addr, size))
{
#if defined(NV_4K_PAGE_ISOLATION_PRESENT)
nvuap->remap_prot_extra = NV_PROT_4K_PAGE_ISOLATION;
nvuap->access_start = (NvU64)NV_4K_PAGE_ISOLATION_ACCESS_START(addr);
nvuap->access_size = NV_4K_PAGE_ISOLATION_ACCESS_LEN(addr, size);
nvuap->memArea.pRanges[0].start = (NvU64)NV_4K_PAGE_ISOLATION_MMAP_ADDR(addr);
nvuap->memArea.pRanges[0].size = NV_4K_PAGE_ISOLATION_MMAP_LEN(size);
#else
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "4K page isolation required but not available!\n");
return NV_ERR_OPERATING_SYSTEM;
#endif
}
return NV_OK;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -294,7 +294,6 @@ static NV_STATUS nv_alloc_coherent_pages(
NvU32 i;
unsigned int gfp_mask;
unsigned long virt_addr = 0;
dma_addr_t bus_addr;
nv_linux_state_t *nvl;
struct device *dev;
@@ -312,7 +311,7 @@ static NV_STATUS nv_alloc_coherent_pages(
virt_addr = (unsigned long)dma_alloc_coherent(dev,
at->num_pages * PAGE_SIZE,
&bus_addr,
&at->dma_handle,
gfp_mask);
if (!virt_addr)
{
@@ -327,7 +326,6 @@ static NV_STATUS nv_alloc_coherent_pages(
page_ptr->virt_addr = virt_addr + i * PAGE_SIZE;
page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr);
page_ptr->dma_addr = bus_addr + i * PAGE_SIZE;
}
if (at->cache_type != NV_MEMORY_CACHED)
@@ -358,7 +356,7 @@ static void nv_free_coherent_pages(
}
dma_free_coherent(dev, at->num_pages * PAGE_SIZE,
(void *)page_ptr->virt_addr, page_ptr->dma_addr);
(void *)page_ptr->virt_addr, at->dma_handle);
}
NV_STATUS nv_alloc_contig_pages(
@@ -372,13 +370,12 @@ NV_STATUS nv_alloc_contig_pages(
unsigned int gfp_mask;
unsigned long virt_addr = 0;
NvU64 phys_addr;
struct device *dev = at->dev;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
// TODO: This is a temporary WAR, and will be removed after fixing bug 200732409.
if (os_is_xen_dom0() || at->flags.unencrypted)
if (os_is_xen_dom0())
return nv_alloc_coherent_pages(nv, at);
at->order = get_order(at->num_pages * PAGE_SIZE);
@@ -396,6 +393,11 @@ NV_STATUS nv_alloc_contig_pages(
else
{
NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
// In CC, NV_GET_FREE_PAGES only allocates protected sysmem.
// To get unprotected sysmem, this memory is marked as unencrypted.
nv_set_memory_decrypted_zeroed(at->flags.unencrypted, virt_addr, 1 << at->order,
at->num_pages * PAGE_SIZE);
}
if (virt_addr == 0)
{
@@ -432,7 +434,6 @@ NV_STATUS nv_alloc_contig_pages(
page_ptr = at->page_table[i];
page_ptr->phys_addr = phys_addr;
page_ptr->virt_addr = virt_addr;
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
NV_MAYBE_RESERVE_PAGE(page_ptr);
}
@@ -456,6 +457,11 @@ failed:
}
page_ptr = at->page_table[0];
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
nv_set_memory_encrypted(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
return status;
@@ -490,6 +496,10 @@ void nv_free_contig_pages(
page_ptr = at->page_table[0];
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
nv_set_memory_encrypted(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
@@ -504,8 +514,6 @@ NV_STATUS nv_alloc_system_pages(
unsigned int gfp_mask;
unsigned long virt_addr = 0;
NvU64 phys_addr;
struct device *dev = at->dev;
dma_addr_t bus_addr;
unsigned int alloc_page_size = PAGE_SIZE << at->order;
unsigned int alloc_num_pages = NV_CEIL(at->num_pages * PAGE_SIZE, alloc_page_size);
@@ -521,15 +529,7 @@ NV_STATUS nv_alloc_system_pages(
for (i = 0; i < alloc_num_pages; i++)
{
if (at->flags.unencrypted && (dev != NULL))
{
virt_addr = (unsigned long)dma_alloc_coherent(dev,
alloc_page_size,
&bus_addr,
gfp_mask);
at->flags.coherent = NV_TRUE;
}
else if (at->flags.node)
if (at->flags.node)
{
unsigned long ptr = 0ULL;
NV_ALLOC_PAGES_NODE(ptr, at->node_id, at->order, gfp_mask);
@@ -545,6 +545,11 @@ NV_STATUS nv_alloc_system_pages(
else
{
NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
// In CC, NV_GET_FREE_PAGES only allocates protected sysmem.
// To get unprotected sysmem, this memory is marked as unencrypted.
nv_set_memory_decrypted_zeroed(at->flags.unencrypted, virt_addr, 1 << at->order,
alloc_page_size);
}
if (virt_addr == 0)
@@ -563,6 +568,11 @@ NV_STATUS nv_alloc_system_pages(
for (sub_page_idx = 0; sub_page_idx < os_pages_in_page; sub_page_idx++)
{
unsigned long sub_page_virt_addr = virt_addr + sub_page_offset;
unsigned int base_page_idx = (i * os_pages_in_page) + sub_page_idx;
if (base_page_idx >= at->num_pages)
break;
phys_addr = nv_get_kern_phys_address(sub_page_virt_addr);
if (phys_addr == 0)
{
@@ -586,21 +596,10 @@ NV_STATUS nv_alloc_system_pages(
}
#endif
page_ptr = at->page_table[(i * os_pages_in_page) + sub_page_idx];
page_ptr = at->page_table[base_page_idx];
page_ptr->phys_addr = phys_addr;
page_ptr->virt_addr = sub_page_virt_addr;
//
// Use unencrypted dma_addr returned by dma_alloc_coherent() as
// nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled.
//
if (at->flags.coherent)
page_ptr->dma_addr = bus_addr;
else if (dev != NULL)
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
else
page_ptr->dma_addr = page_ptr->phys_addr;
NV_MAYBE_RESERVE_PAGE(page_ptr);
sub_page_offset += PAGE_SIZE;
}
@@ -618,15 +617,12 @@ failed:
{
page_ptr = at->page_table[j * os_pages_in_page];
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
if (at->flags.coherent)
{
dma_free_coherent(dev, alloc_page_size, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
nv_set_memory_encrypted(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
}
@@ -639,7 +635,6 @@ void nv_free_system_pages(
{
nvidia_pte_t *page_ptr;
unsigned int i;
struct device *dev = at->dev;
unsigned int alloc_page_size = PAGE_SIZE << at->order;
unsigned int os_pages_in_page = alloc_page_size / PAGE_SIZE;
@@ -661,18 +656,47 @@ void nv_free_system_pages(
{
page_ptr = at->page_table[i];
if (at->flags.coherent)
{
dma_free_coherent(dev, alloc_page_size, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
nv_set_memory_encrypted(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
}
static NvUPtr nv_vmap(struct page **pages, NvU32 page_count,
NvBool cached, NvBool unencrypted)
{
void *ptr;
pgprot_t prot = PAGE_KERNEL;
#if defined(NVCPU_X86_64)
#if defined(PAGE_KERNEL_NOENC)
if (unencrypted)
{
prot = cached ? nv_adjust_pgprot(PAGE_KERNEL_NOENC) :
nv_adjust_pgprot(NV_PAGE_KERNEL_NOCACHE_NOENC);
}
else
#endif
{
prot = cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE;
}
#elif defined(NVCPU_AARCH64)
prot = cached ? PAGE_KERNEL : NV_PGPROT_UNCACHED(PAGE_KERNEL);
#endif
/* All memory cached in PPC64LE; can't honor 'cached' input. */
ptr = vmap(pages, page_count, VM_MAP, prot);
NV_MEMDBG_ADD(ptr, page_count * PAGE_SIZE);
return (NvUPtr)ptr;
}
static void nv_vunmap(NvUPtr vaddr, NvU32 page_count)
{
vunmap((void *)vaddr);
NV_MEMDBG_REMOVE((void *)vaddr, page_count * PAGE_SIZE);
}
NvUPtr nv_vm_map_pages(
struct page **pages,
NvU32 count,

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -278,6 +278,67 @@ struct dev_pm_ops nv_pm_ops = {
*** STATIC functions
***/
#if defined(NVCPU_X86_64)
#define NV_AMD_SME_BIT BIT(0)
static
NvBool nv_is_sme_supported(
void
)
{
unsigned int eax, ebx, ecx, edx;
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
{
return NV_FALSE;
}
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SME is supported */
if (!(eax & NV_AMD_SME_BIT))
{
return NV_FALSE;
}
return NV_TRUE;
}
#endif
static
NvBool nv_detect_sme_enabled(
void
)
{
#if (defined(MSR_K8_SYSCFG) || defined(MSR_AMD64_SYSCFG)) && defined(NVCPU_X86_64)
NvU32 lo_val, hi_val;
if (!nv_is_sme_supported())
{
return NV_FALSE;
}
#if defined(MSR_AMD64_SYSCFG)
rdmsr(MSR_AMD64_SYSCFG, lo_val, hi_val);
#if defined(MSR_AMD64_SYSCFG_MEM_ENCRYPT)
return (lo_val & MSR_AMD64_SYSCFG_MEM_ENCRYPT) ? NV_TRUE : NV_FALSE;
#endif //defined(MSR_AMD64_SYSCFG_MEM_ENCRYPT)
#elif defined(MSR_K8_SYSCFG)
rdmsr(MSR_K8_SYSCFG, lo_val, hi_val);
#if defined(MSR_K8_SYSCFG_MEM_ENCRYPT)
return (lo_val & MSR_K8_SYSCFG_MEM_ENCRYPT) ? NV_TRUE : NV_FALSE;
#endif //defined(MSR_K8_SYSCFG_MEM_ENCRYPT)
#endif //defined(MSR_AMD64_SYSCFG)
#else
return NV_FALSE;
#endif //(defined(MSR_K8_SYSCFG) || defined(MSR_AMD64_SYSCFG)) && defined(NVCPU_X86_64)
}
static
void nv_detect_conf_compute_platform(
void
@@ -290,6 +351,8 @@ void nv_detect_conf_compute_platform(
os_cc_sev_snp_enabled = cc_platform_has(CC_ATTR_GUEST_SEV_SNP);
#endif
os_cc_sme_enabled = cc_platform_has(CC_ATTR_MEM_ENCRYPT);
#if defined(NV_HV_GET_ISOLATION_TYPE) && IS_ENABLED(CONFIG_HYPERV) && defined(NVCPU_X86_64)
if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP)
{
@@ -306,6 +369,7 @@ void nv_detect_conf_compute_platform(
#else
os_cc_enabled = NV_FALSE;
os_cc_sev_snp_enabled = NV_FALSE;
os_cc_sme_enabled = nv_detect_sme_enabled();
os_cc_snp_vtom_enabled = NV_FALSE;
os_cc_tdx_enabled = NV_FALSE;
#endif //NV_CC_PLATFORM_PRESENT
@@ -630,6 +694,9 @@ nv_report_applied_patches(void)
static void
nv_drivers_exit(void)
{
#if NV_SUPPORTS_PLATFORM_DEVICE
nv_platform_unregister_driver();
#endif
nv_pci_unregister_driver();
}
@@ -646,6 +713,16 @@ nv_drivers_init(void)
goto exit;
}
#if NV_SUPPORTS_PLATFORM_DEVICE
rc = nv_platform_register_driver();
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: SOC driver registration failed!\n");
nv_pci_unregister_driver();
rc = -ENODEV;
}
#endif
exit:
return rc;
}
@@ -1561,6 +1638,11 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
int rc;
NV_STATUS status;
if ((nv->flags & NV_FLAG_PCI_REMOVE_IN_PROGRESS) != 0)
{
return -ENODEV;
}
if ((nv->flags & NV_FLAG_EXCLUDE) != 0)
{
char *uuid = rm_get_gpu_uuid(sp, nv);
@@ -3197,15 +3279,13 @@ nv_alias_pages(
if (contiguous && i>0)
{
page_ptr->dma_addr = pte_array[0] + (i << PAGE_SHIFT);
page_ptr->phys_addr = pte_array[0] + (i << PAGE_SHIFT);
}
else
{
page_ptr->dma_addr = pte_array[i];
page_ptr->phys_addr = pte_array[i];
}
page_ptr->phys_addr = page_ptr->dma_addr;
/* aliased pages will be mapped on demand. */
page_ptr->virt_addr = 0x0;
}
@@ -3291,7 +3371,8 @@ NV_STATUS NV_API_CALL nv_register_user_pages(
NvU64 page_count,
NvU64 *phys_addr,
void *import_priv,
void **priv_data
void **priv_data,
NvBool unencrypted
)
{
nv_alloc_t *at;
@@ -3322,6 +3403,9 @@ NV_STATUS NV_API_CALL nv_register_user_pages(
at->flags.user = NV_TRUE;
if (unencrypted)
at->flags.unencrypted = NV_TRUE;
at->order = get_order(at->num_pages * PAGE_SIZE);
for (i = 0; i < page_count; i++)
@@ -3394,7 +3478,6 @@ NV_STATUS NV_API_CALL nv_register_phys_pages(
nv_alloc_t *at;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU64 i;
NvU64 addr;
at = nvos_create_alloc(nvl->dev, page_count);
@@ -3413,9 +3496,9 @@ NV_STATUS NV_API_CALL nv_register_phys_pages(
at->order = get_order(at->num_pages * PAGE_SIZE);
for (i = 0, addr = phys_addr[0]; i < page_count; addr = phys_addr[++i])
for (i = 0; i < page_count; i++)
{
at->page_table[i]->phys_addr = addr;
at->page_table[i]->phys_addr = phys_addr[i];
}
at->user_pages = NULL;

View File

@@ -173,7 +173,8 @@ NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEc
NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid);
NvBool bVidMemAperture, NvU32 pasid,
NvU64 *dmaAdress);
NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace);

View File

@@ -208,7 +208,6 @@ NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session,
memset(platformInfo, 0, sizeof(*platformInfo));
platformInfo->atsSupported = nv_ats_supported;
platformInfo->confComputingEnabled = os_cc_enabled;
status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session);
@@ -692,7 +691,8 @@ EXPORT_SYMBOL(nvUvmInterfaceServiceDeviceInterruptsRM);
NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid)
NvBool bVidMemAperture, NvU32 pasid,
NvU64 *dmaAddress)
{
nvidia_stack_t *sp = NULL;
NV_STATUS status;
@@ -703,7 +703,8 @@ NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace,
}
status = rm_gpu_ops_set_page_directory(sp, (gpuAddressSpaceHandle)vaSpace,
physAddress, numEntries, bVidMemAperture, pasid);
physAddress, numEntries, bVidMemAperture, pasid,
dmaAddress);
nv_kmem_cache_free_stack(sp);
return status;
@@ -894,6 +895,7 @@ NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device,
goto error;
}
}
goto done;
error:

View File

@@ -1,6 +1,14 @@
NVIDIA_SOURCES ?=
NVIDIA_SOURCES_CXX ?=
NVIDIA_SOURCES += nvidia/nv-platform.c
NVIDIA_SOURCES += nvidia/nv-dsi-parse-panel-props.c
NVIDIA_SOURCES += nvidia/nv-bpmp.c
NVIDIA_SOURCES += nvidia/nv-gpio.c
NVIDIA_SOURCES += nvidia/nv-backlight.c
NVIDIA_SOURCES += nvidia/nv-imp.c
NVIDIA_SOURCES += nvidia/nv-platform-pm.c
NVIDIA_SOURCES += nvidia/nv-ipc-soc.c
NVIDIA_SOURCES += nvidia/nv.c
NVIDIA_SOURCES += nvidia/nv-pci.c
NVIDIA_SOURCES += nvidia/nv-dmabuf.c
@@ -30,6 +38,7 @@ NVIDIA_SOURCES += nvidia/nv-rsync.c
NVIDIA_SOURCES += nvidia/nv-msi.c
NVIDIA_SOURCES += nvidia/nv-caps.c
NVIDIA_SOURCES += nvidia/nv-caps-imex.c
NVIDIA_SOURCES += nvidia/nv-clk.c
NVIDIA_SOURCES += nvidia/nv-host1x.c
NVIDIA_SOURCES += nvidia/nv_uvm_interface.c
NVIDIA_SOURCES += nvidia/libspdm_aead.c

View File

@@ -109,7 +109,6 @@ $(obj)/$(NVIDIA_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_OBJECTS))
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_OBJECTS)
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hash__remap_4k_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc
@@ -196,6 +195,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
NV_CONFTEST_FUNCTION_COMPILE_TESTS += stack_trace
NV_CONFTEST_FUNCTION_COMPILE_TESTS += crypto_tfm_ctx_aligned
NV_CONFTEST_FUNCTION_COMPILE_TESTS += assign_str
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
@@ -234,6 +234,10 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += follow_pte_arg_vma
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pfnmap_start
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_pci_ats_supported
NV_CONFTEST_SYMBOL_COMPILE_TESTS += ecc_digits_from_bytes
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_set_memory_encrypted
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_set_memory_decrypted
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl___platform_driver_register
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___platform_driver_register
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops
@@ -258,6 +262,8 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += memory_failure_has_trapno_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += foll_longterm_present
NV_CONFTEST_TYPE_COMPILE_TESTS += bus_type_has_iommu_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += of_property_for_each_u32_has_internal_args
NV_CONFTEST_TYPE_COMPILE_TESTS += platform_driver_struct_remove_returns_void
NV_CONFTEST_TYPE_COMPILE_TESTS += class_create_has_no_owner_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += class_devnode_has_const_arg

View File

@@ -0,0 +1,62 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM nvswitch
#if !defined(_TRACE_NVSWITCH_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_NVSWITCH_EVENT_H
#include <linux/tracepoint.h>
TRACE_EVENT(nvswitch_dev_sxid,
TP_PROTO(const struct pci_dev *pdev, uint32_t error_code, const char *msg),
TP_ARGS(pdev, error_code, msg),
TP_STRUCT__entry(
__string(dev, pci_name(pdev))
__field (u32, error_code)
__string(msg, msg)
),
TP_fast_assign(
#if NV_ASSIGN_STR_ARGUMENT_COUNT == 1
__assign_str(dev);
__assign_str(msg);
#else
__assign_str(dev, pci_name(pdev));
__assign_str(msg, msg);
#endif
__entry->error_code = error_code;
),
TP_printk("SXid (PCI:%s): %u, %s", __get_str(dev), __entry->error_code, __get_str(msg))
);
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE nvswitch_event
#include <trace/define_trace.h>

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,6 +27,8 @@
#include "nv-linux.h"
#include "nv-caps-imex.h"
#include "nv-platform.h"
#include "nv-time.h"
#include <linux/mmzone.h>
@@ -56,6 +58,7 @@ NvBool os_cc_enabled = 0;
NvBool os_cc_sev_snp_enabled = 0;
NvBool os_cc_snp_vtom_enabled = 0;
NvBool os_cc_tdx_enabled = 0;
NvBool os_cc_sme_enabled = 0;
#if defined(CONFIG_DMA_SHARED_BUFFER)
NvBool os_dma_buf_enabled = NV_TRUE;
@@ -2119,6 +2122,31 @@ void NV_API_CALL os_wake_up
complete_all(&wq->q);
}
NV_STATUS NV_API_CALL os_get_tegra_platform
(
NvU32 *mode
)
{
#if defined(NV_SOC_TEGRA_FUSE_HELPER_H_PRESENT) && NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
if (tegra_platform_is_fpga())
{
*mode = NV_OS_TEGRA_PLATFORM_FPGA;
}
else if (tegra_platform_is_vdk())
{
*mode = NV_OS_TEGRA_PLATFORM_SIM;
}
else
{
*mode = NV_OS_TEGRA_PLATFORM_SILICON;
}
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
nv_cap_t* NV_API_CALL os_nv_cap_init
(
const char *path