mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-02-09 09:39:57 +00:00
515.43.04
This commit is contained in:
983
kernel-open/nvidia/export_nvswitch.h
Normal file
983
kernel-open/nvidia/export_nvswitch.h
Normal file
@@ -0,0 +1,983 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NVSWITCH_EXPORT_H_
|
||||
#define _NVSWITCH_EXPORT_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "nv_stdarg.h"
|
||||
#include "nvlink_common.h"
|
||||
#include "ioctl_common_nvswitch.h"
|
||||
|
||||
#define NVSWITCH_DRIVER_NAME "nvidia-nvswitch"
|
||||
|
||||
#define NVSWITCH_MAX_BARS 1
|
||||
|
||||
#define NVSWITCH_DEVICE_INSTANCE_MAX 64
|
||||
|
||||
#define PCI_CLASS_BRIDGE_NVSWITCH 0x0680
|
||||
|
||||
#ifndef PCI_VENDOR_ID_NVIDIA
|
||||
#define PCI_VENDOR_ID_NVIDIA 0x10DE
|
||||
#endif
|
||||
|
||||
#define PCI_ADDR_OFFSET_VENDOR 0
|
||||
#define PCI_ADDR_OFFSET_DEVID 2
|
||||
|
||||
#define NVSWITCH_NSEC_PER_SEC 1000000000ULL
|
||||
|
||||
#define NVSWITCH_DBG_LEVEL_MMIO 0x0
|
||||
#define NVSWITCH_DBG_LEVEL_INFO 0x1
|
||||
#define NVSWITCH_DBG_LEVEL_SETUP 0x2
|
||||
#define NVSWITCH_DBG_LEVEL_WARN 0x3
|
||||
#define NVSWITCH_DBG_LEVEL_ERROR 0x4
|
||||
|
||||
#define NVSWITCH_LOG_BUFFER_SIZE 512
|
||||
|
||||
#define NVSWITCH_DMA_DIR_TO_SYSMEM 0
|
||||
#define NVSWITCH_DMA_DIR_FROM_SYSMEM 1
|
||||
#define NVSWITCH_DMA_DIR_BIDIRECTIONAL 2
|
||||
|
||||
#define NVSWITCH_I2C_CMD_READ 0
|
||||
#define NVSWITCH_I2C_CMD_WRITE 1
|
||||
#define NVSWITCH_I2C_CMD_SMBUS_READ 2
|
||||
#define NVSWITCH_I2C_CMD_SMBUS_WRITE 3
|
||||
#define NVSWITCH_I2C_CMD_SMBUS_QUICK_READ 4
|
||||
#define NVSWITCH_I2C_CMD_SMBUS_QUICK_WRITE 5
|
||||
|
||||
typedef struct nvswitch_device nvswitch_device;
|
||||
typedef struct NVSWITCH_CLIENT_EVENT NVSWITCH_CLIENT_EVENT;
|
||||
|
||||
/*
|
||||
* @Brief : The interface will check if the client's version is supported by the
|
||||
* driver.
|
||||
*
|
||||
* @param[in] user_version Version of the interface that the client is
|
||||
* compiled with.
|
||||
* @param[out] kernel_version Version of the interface that the kernel driver
|
||||
* is compiled with. This information will be
|
||||
* filled even if the CTRL call returns
|
||||
* -NVL_ERR_NOT_SUPPORTED due to version mismatch.
|
||||
* @param[in] length Version string buffer length
|
||||
*
|
||||
* @returns NVL_SUCCESS if the client is using compatible
|
||||
* interface.
|
||||
* -NVL_ERR_NOT_SUPPORTED if the client is using
|
||||
* incompatible interface.
|
||||
* Or, Other NVL_XXX status value.
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_check_api_version
|
||||
(
|
||||
const char *user_version,
|
||||
char *kernel_version,
|
||||
NvU32 length
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Allocate a new nvswitch lib device instance.
|
||||
*
|
||||
* @Description : Creates and registers a new nvswitch device and registers
|
||||
* with the nvlink library. This only initializes software state,
|
||||
* it does not initialize the hardware state.
|
||||
*
|
||||
* @param[in] pci_domain pci domain of the device
|
||||
* @param[in] pci_bus pci bus of the device
|
||||
* @param[in] pci_device pci device of the device
|
||||
* @param[in] pci_func pci function of the device
|
||||
* @param[in] device_id pci device ID of the device
|
||||
* @param[in] os_handle Device handle used to interact with OS layer
|
||||
* @param[in] os_instance instance number of this device
|
||||
* @param[out] device return device handle for interfacing with library
|
||||
*
|
||||
* @returns NVL_SUCCESS if the action succeeded
|
||||
* an NVL error code otherwise
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_register_device
|
||||
(
|
||||
NvU16 pci_domain,
|
||||
NvU8 pci_bus,
|
||||
NvU8 pci_device,
|
||||
NvU8 pci_func,
|
||||
NvU16 device_id,
|
||||
void *os_handle,
|
||||
NvU32 os_instance,
|
||||
nvswitch_device **device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Clean-up the software state for a nvswitch device.
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @param[in] device device handle to destroy
|
||||
*
|
||||
* @returns none
|
||||
*/
|
||||
void
|
||||
nvswitch_lib_unregister_device
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Initialize the hardware for a nvswitch device.
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @param[in] device a reference to the device to initialize
|
||||
*
|
||||
* @returns NVL_SUCCESS if the action succeeded
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
* -NVL_PCI_ERROR if bar info unable to be retrieved
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_initialize_device
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Shutdown the hardware for a nvswitch device.
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @param[in] device a reference to the device to initialize
|
||||
*
|
||||
* @returns NVL_SUCCESS if the action succeeded
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
* -NVL_PCI_ERROR if bar info unable to be retrieved
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_shutdown_device
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief Control call (ioctl) interface.
|
||||
*
|
||||
* @param[in] device device to operate on
|
||||
* @param[in] cmd Enumerated command to execute.
|
||||
* @param[in] params Params structure to pass to the command.
|
||||
* @param[in] params_size Size of the parameter structure.
|
||||
* @param[in] osPrivate The private data structure for OS.
|
||||
*
|
||||
* @return NVL_SUCCESS on a successful command
|
||||
* -NVL_NOT_FOUND if target device unable to be found
|
||||
* -NVL_BAD_ARGS if an invalid cmd is provided
|
||||
* -NVL_BAD_ARGS if a null arg is provided
|
||||
* -NVL_ERR_GENERIC otherwise
|
||||
*/
|
||||
NvlStatus nvswitch_lib_ctrl
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NvU32 cmd,
|
||||
void *params,
|
||||
NvU64 size,
|
||||
void *osPrivate
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief: Retrieve PCI information for a switch based from device instance
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @param[in] lib_handle device to query
|
||||
* @param[out] pciInfo return pointer to nvswitch lib copy of device info
|
||||
*/
|
||||
void nvswitch_lib_get_device_info
|
||||
(
|
||||
nvswitch_device *lib_handle,
|
||||
struct nvlink_pci_info **pciInfo
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief: Retrieve BIOS version for an nvswitch device
|
||||
*
|
||||
* @Description: For devices with a BIOS, this retrieves the BIOS version.
|
||||
*
|
||||
* @param[in] device device to query
|
||||
* @param[out] version BIOS version is stored here
|
||||
*
|
||||
* @returns NVL_SUCCESS BIOS version was retrieved successfully
|
||||
* -NVL_BAD_ARGS an invalid device is provided
|
||||
* -NVL_ERR_INVALID_STATE an error occurred reading BIOS info
|
||||
* -NVL_ERR_NOT_SUPPORTED device doesn't support this feature
|
||||
*/
|
||||
|
||||
NvlStatus
|
||||
nvswitch_lib_get_bios_version
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NvU64 *version
|
||||
);
|
||||
|
||||
|
||||
/*
|
||||
* @Brief: Retrieve whether the device supports PCI pin interrupts
|
||||
*
|
||||
* @Description: Returns whether the device can use PCI pin IRQs
|
||||
*
|
||||
*
|
||||
* @returns NV_TRUE device can use PCI pin IRQs
|
||||
* NV_FALSE device cannot use PCI pin IRQs
|
||||
*/
|
||||
|
||||
NvlStatus
|
||||
nvswitch_lib_use_pin_irq
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
|
||||
/*
|
||||
* @Brief: Load platform information (emulation, simulation etc.).
|
||||
*
|
||||
* @param[in] lib_handle device
|
||||
*
|
||||
* @return NVL_SUCCESS on a successful command
|
||||
* -NVL_BAD_ARGS if an invalid device is provided
|
||||
*/
|
||||
NvlStatus nvswitch_lib_load_platform_info
|
||||
(
|
||||
nvswitch_device *lib_handle
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Enable interrupts for this device
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @param[in] device device to enable
|
||||
*
|
||||
* @returns NVL_SUCCESS
|
||||
* -NVL_PCI_ERROR if there was a register access error
|
||||
*/
|
||||
void
|
||||
nvswitch_lib_enable_interrupts
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Disable interrupts for this device
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @param[in] device device to enable
|
||||
*
|
||||
* @returns NVL_SUCCESS
|
||||
* -NVL_PCI_ERROR if there was a register access error
|
||||
*/
|
||||
void
|
||||
nvswitch_lib_disable_interrupts
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Check if interrupts are pending on this device
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @param[in] device device to check
|
||||
*
|
||||
* @returns NVL_SUCCESS if there were no errors and interrupts were handled
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
* -NVL_PCI_ERROR if there was a register access error
|
||||
* -NVL_MORE_PROCESSING_REQUIRED no interrupts were found for this device
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_check_interrupts
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Services interrupts for this device
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @param[in] device device to service
|
||||
*
|
||||
* @returns NVL_SUCCESS if there were no errors and interrupts were handled
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
* -NVL_PCI_ERROR if there was a register access error
|
||||
* -NVL_MORE_PROCESSING_REQUIRED no interrupts were found for this device
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_service_interrupts
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Get depth of error logs
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @param[in] device device to check
|
||||
*
|
||||
* @param[out] fatal Count of fatal errors
|
||||
* @param[out] nonfatal Count of non-fatal errors
|
||||
*
|
||||
* @returns NVL_SUCCESS if there were no errors and interrupts were handled
|
||||
* -NVL_NOT_FOUND if bad arguments provided
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_get_log_count
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NvU32 *fatal, NvU32 *nonfatal
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Periodic thread-based dispatcher for kernel functions
|
||||
*
|
||||
* @Description : Its purpose is to do any background subtasks (data collection, thermal
|
||||
* monitoring, etc. These subtasks may need to run at varying intervals, and
|
||||
* may even wish to adjust their execution period based on other factors.
|
||||
* Each subtask's entry notes the last time it was executed and its desired
|
||||
* execution period. This function returns back to the dispatcher the desired
|
||||
* time interval before it should be called again.
|
||||
*
|
||||
* @param[in] device The device to run background tasks on
|
||||
*
|
||||
* @returns nsec interval to wait before the next call.
|
||||
*/
|
||||
NvU64
|
||||
nvswitch_lib_deferred_task_dispatcher
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Perform post init tasks
|
||||
*
|
||||
* @Description : Any device initialization/tests which need the device to be
|
||||
* initialized to a sane state go here.
|
||||
*
|
||||
* @param[in] device The device to run the post-init on
|
||||
*
|
||||
* @returns returns NvlStatus code, see nvlink_errors.h
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_post_init_device
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Perform post init tasks for a blacklisted device
|
||||
*
|
||||
* @Description : Any initialization tasks that should be run after a
|
||||
* blacklisted item should go here.
|
||||
*
|
||||
* @param[in] device The device to run the post-init-blacklist on
|
||||
*
|
||||
* @returns void
|
||||
*/
|
||||
void
|
||||
nvswitch_lib_post_init_blacklist_device
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Get the UUID of the device
|
||||
*
|
||||
* @Description : Copies out the device's UUID into the uuid field
|
||||
*
|
||||
* @param[in] device The device to get the UUID from
|
||||
*
|
||||
* @param[out] uuid A pointer to a uuid struct in which the UUID is written to
|
||||
*
|
||||
* @returns void
|
||||
*/
|
||||
void
|
||||
nvswitch_lib_get_uuid
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NvUuid *uuid
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Get the Physical ID of the device
|
||||
*
|
||||
* @Description : Copies out the device's Physical ID into the phys_id field
|
||||
*
|
||||
* @param[in] device The device to get the UUID from
|
||||
*
|
||||
* @param[out] phys_id A pointer to a NvU32 which the physical ID is written to
|
||||
*
|
||||
* @returns NVL_SUCCESS if successful
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_get_physid
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NvU32 *phys_id
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Read the Fabric State for a nvswitch device.
|
||||
*
|
||||
* @Description : Returns the Fabric State for the device
|
||||
*
|
||||
* @param[in] device a reference to the device
|
||||
* @param[in] *ptrs references to the fabric state
|
||||
*
|
||||
* @returns NVL_SUCCESS if the action succeeded
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_read_fabric_state
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NVSWITCH_DEVICE_FABRIC_STATE *device_fabric_state,
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON *device_blacklist_reason,
|
||||
NVSWITCH_DRIVER_FABRIC_STATE *driver_fabric_state
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Validates PCI device id
|
||||
*
|
||||
* @Description : Validates PCI device id
|
||||
*
|
||||
* @param[in] device The device id to be validated
|
||||
*
|
||||
* @returns True if device id is valid
|
||||
*/
|
||||
NvBool
|
||||
nvswitch_lib_validate_device_id
|
||||
(
|
||||
NvU32 device_id
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Gets an event if it exists in the Event list
|
||||
*
|
||||
* @Description : Gets an event if it is in the Device's Client
|
||||
* Event list
|
||||
*
|
||||
* @param[in] device Device to operate on
|
||||
* @param[in] osPrivate The private data structure for the OS
|
||||
* @param[out] ppClientEvent Double pointer to client event
|
||||
*
|
||||
* @returns NVL_SUCCESS if client event found
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
* -NVL_NOT_FOUND if no client event found
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_get_client_event
|
||||
(
|
||||
nvswitch_device *device,
|
||||
void *osPrivate,
|
||||
NVSWITCH_CLIENT_EVENT **ppClientEvent
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Adds a single entry into the Event list
|
||||
*
|
||||
* @Description : Adds an entry into the front of the Device's
|
||||
* Client Event List
|
||||
*
|
||||
* @param[in] device Device to operate on
|
||||
* @param[in] osPrivate The private data structure for OS
|
||||
* @param[in] pParams The parameters for the client event
|
||||
*
|
||||
* @returns NVL_SUCCESS if event added
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
* -NVL_NO_MEM if allocation fails
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_add_client_event
|
||||
(
|
||||
nvswitch_device *device,
|
||||
void *osPrivate,
|
||||
NvU32 eventId
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Removes entries from the Event list
|
||||
*
|
||||
* @Description : Removes the entries associated with osPrivate
|
||||
* from the Device's Client Event List
|
||||
*
|
||||
* @param[in] device Device to operate on
|
||||
* @param[in] osPrivate The private data structure for OS
|
||||
*
|
||||
* @returns NVL_SUCCESS if event removed
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_remove_client_events
|
||||
(
|
||||
nvswitch_device *device,
|
||||
void *osPrivate
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Notifies all events with a matching event Id in the Client Event list
|
||||
*
|
||||
* @Description : Notifies all events with a matching event Id in the Client Event list
|
||||
*
|
||||
* @param[in] device Device to operate on
|
||||
* @param[in] eventId The event ID to notify
|
||||
*
|
||||
* @returns NVL_SUCCESS if arguments are valid
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_notify_client_events
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NvU32 eventId
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Gets a mask of valid I2C ports for the device
|
||||
*
|
||||
* @Description : Gets a mask of valid I2C ports for the device
|
||||
*
|
||||
* @param[in] device Device to operate on
|
||||
* @param[out] validPortsMask A pointer to a mask of valid ports
|
||||
*
|
||||
* @returns NVL_SUCCESS if successfuly
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_get_valid_ports_mask
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NvU32 *validPortsMask
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Returns a boolean if the I2C interface is supported for the device
|
||||
*
|
||||
* @Description : Returns a boolean if the I2C interface is supported for the device
|
||||
*
|
||||
* @param[in] device Device to operate on
|
||||
*
|
||||
* @returns NV_TRUE device can use the I2C interface
|
||||
* NV_FALSE device cannot use the I2C interface
|
||||
*/
|
||||
NvBool
|
||||
nvswitch_lib_is_i2c_supported
|
||||
(
|
||||
nvswitch_device *device
|
||||
);
|
||||
|
||||
/*
|
||||
* @Brief : Performs an I2C transaction
|
||||
*
|
||||
* @Description : Performs an I2C transaction
|
||||
*
|
||||
* @param[in] device Device to operate on
|
||||
* @param[in] port Port to issue I2C transaction
|
||||
* @param[in] type Type of I2C transaction
|
||||
* @param[in] addr Device address to perform I2C transaction on
|
||||
* @param[in] command I2C command to perform on
|
||||
* @param[in] len Length of the I2C transaction message
|
||||
* @param[in/out] pData A pointer to the buffer containing the input/output data
|
||||
*
|
||||
* @returns NVL_SUCCESS if I2C transaction completes
|
||||
* -NVL_BAD_ARGS if bad arguments provided
|
||||
* -NVL_ERR_INVALID_STATE if something internal went wrong
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_lib_i2c_transfer
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NvU32 port,
|
||||
NvU8 type,
|
||||
NvU8 addr,
|
||||
NvU8 command,
|
||||
NvU32 len,
|
||||
NvU8 *pData
|
||||
);
|
||||
|
||||
/*
|
||||
* Returns count of registered NvSwitch devices.
|
||||
*/
|
||||
NvU32
|
||||
nvswitch_os_get_device_count
|
||||
(
|
||||
void
|
||||
);
|
||||
|
||||
/*
|
||||
* Get current time in nanoseconds
|
||||
* The time is since epoch time (midnight UTC of January 1, 1970)
|
||||
*/
|
||||
NvU64
|
||||
nvswitch_os_get_platform_time
|
||||
(
|
||||
void
|
||||
);
|
||||
|
||||
#if (defined(_WIN32) || defined(_WIN64))
|
||||
#define NVSWITCH_PRINT_ATTRIB(str, arg1)
|
||||
#else
|
||||
#define NVSWITCH_PRINT_ATTRIB(str, arg1) \
|
||||
__attribute__ ((format (printf, (str), (arg1))))
|
||||
#endif // (defined(_WIN32) || defined(_WIN64))
|
||||
|
||||
/*
|
||||
* printf wrapper
|
||||
*/
|
||||
void
|
||||
NVSWITCH_PRINT_ATTRIB(2, 3)
|
||||
nvswitch_os_print
|
||||
(
|
||||
int log_level,
|
||||
const char *pFormat,
|
||||
...
|
||||
);
|
||||
|
||||
/*
|
||||
* "Registry" interface for dword
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_os_read_registry_dword
|
||||
(
|
||||
void *os_handle,
|
||||
const char *name,
|
||||
NvU32 *data
|
||||
);
|
||||
|
||||
/*
|
||||
* "Registry" interface for binary data
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_os_read_registery_binary
|
||||
(
|
||||
void *os_handle,
|
||||
const char *name,
|
||||
NvU8 *data,
|
||||
NvU32 length
|
||||
);
|
||||
|
||||
NvBool
|
||||
nvswitch_os_is_uuid_in_blacklist
|
||||
(
|
||||
NvUuid *uuid
|
||||
);
|
||||
|
||||
|
||||
/*
|
||||
* Override platform/simulation settings for cases
|
||||
*/
|
||||
void
|
||||
nvswitch_os_override_platform
|
||||
(
|
||||
void *os_handle,
|
||||
NvBool *rtlsim
|
||||
);
|
||||
|
||||
/*
|
||||
* Memory management interface
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_os_alloc_contig_memory
|
||||
(
|
||||
void *os_handle,
|
||||
void **virt_addr,
|
||||
NvU32 size,
|
||||
NvBool force_dma32
|
||||
);
|
||||
|
||||
void
|
||||
nvswitch_os_free_contig_memory
|
||||
(
|
||||
void *os_handle,
|
||||
void *virt_addr,
|
||||
NvU32 size
|
||||
);
|
||||
|
||||
NvlStatus
|
||||
nvswitch_os_map_dma_region
|
||||
(
|
||||
void *os_handle,
|
||||
void *cpu_addr,
|
||||
NvU64 *dma_handle,
|
||||
NvU32 size,
|
||||
NvU32 direction
|
||||
);
|
||||
|
||||
NvlStatus
|
||||
nvswitch_os_unmap_dma_region
|
||||
(
|
||||
void *os_handle,
|
||||
void *cpu_addr,
|
||||
NvU64 dma_handle,
|
||||
NvU32 size,
|
||||
NvU32 direction
|
||||
);
|
||||
|
||||
NvlStatus
|
||||
nvswitch_os_set_dma_mask
|
||||
(
|
||||
void *os_handle,
|
||||
NvU32 dma_addr_width
|
||||
);
|
||||
|
||||
NvlStatus
|
||||
nvswitch_os_sync_dma_region_for_cpu
|
||||
(
|
||||
void *os_handle,
|
||||
NvU64 dma_handle,
|
||||
NvU32 size,
|
||||
NvU32 direction
|
||||
);
|
||||
|
||||
NvlStatus
|
||||
nvswitch_os_sync_dma_region_for_device
|
||||
(
|
||||
void *os_handle,
|
||||
NvU64 dma_handle,
|
||||
NvU32 size,
|
||||
NvU32 direction
|
||||
);
|
||||
|
||||
void *
|
||||
nvswitch_os_malloc_trace
|
||||
(
|
||||
NvLength size,
|
||||
const char *file,
|
||||
NvU32 line
|
||||
);
|
||||
|
||||
void
|
||||
nvswitch_os_free
|
||||
(
|
||||
void *pMem
|
||||
);
|
||||
|
||||
NvLength
|
||||
nvswitch_os_strlen
|
||||
(
|
||||
const char *str
|
||||
);
|
||||
|
||||
char*
|
||||
nvswitch_os_strncpy
|
||||
(
|
||||
char *pDest,
|
||||
const char *pSrc,
|
||||
NvLength length
|
||||
);
|
||||
|
||||
int
|
||||
nvswitch_os_strncmp
|
||||
(
|
||||
const char *s1,
|
||||
const char *s2,
|
||||
NvLength length
|
||||
);
|
||||
|
||||
void *
|
||||
nvswitch_os_memset
|
||||
(
|
||||
void *pDest,
|
||||
int value,
|
||||
NvLength size
|
||||
);
|
||||
|
||||
void *
|
||||
nvswitch_os_memcpy
|
||||
(
|
||||
void *pDest,
|
||||
const void *pSrc,
|
||||
NvLength size
|
||||
);
|
||||
|
||||
int
|
||||
nvswitch_os_memcmp
|
||||
(
|
||||
const void *s1,
|
||||
const void *s2,
|
||||
NvLength size
|
||||
);
|
||||
|
||||
/*
|
||||
* Memory read / write interface
|
||||
*/
|
||||
NvU32
|
||||
nvswitch_os_mem_read32
|
||||
(
|
||||
const volatile void * pAddress
|
||||
);
|
||||
|
||||
void
|
||||
nvswitch_os_mem_write32
|
||||
(
|
||||
volatile void *pAddress,
|
||||
NvU32 data
|
||||
);
|
||||
|
||||
NvU64
|
||||
nvswitch_os_mem_read64
|
||||
(
|
||||
const volatile void *pAddress
|
||||
);
|
||||
|
||||
void
|
||||
nvswitch_os_mem_write64
|
||||
(
|
||||
volatile void *pAddress,
|
||||
NvU64 data
|
||||
);
|
||||
|
||||
/*
|
||||
* Interface to write formatted output to sized buffer
|
||||
*/
|
||||
int
|
||||
nvswitch_os_snprintf
|
||||
(
|
||||
char *pString,
|
||||
NvLength size,
|
||||
const char *pFormat,
|
||||
...
|
||||
);
|
||||
|
||||
/*
|
||||
* Interface to write formatted output to sized buffer
|
||||
*/
|
||||
int
|
||||
nvswitch_os_vsnprintf
|
||||
(
|
||||
char *buf,
|
||||
NvLength size,
|
||||
const char *fmt,
|
||||
va_list arglist
|
||||
);
|
||||
|
||||
/*
|
||||
* Debug assert and log interface
|
||||
*/
|
||||
void
|
||||
nvswitch_os_assert_log
|
||||
(
|
||||
int cond,
|
||||
const char *pFormat,
|
||||
...
|
||||
);
|
||||
|
||||
/*
|
||||
* Interface to sleep for specified milliseconds. Yields the CPU to scheduler.
|
||||
*/
|
||||
void
|
||||
nvswitch_os_sleep
|
||||
(
|
||||
unsigned int ms
|
||||
);
|
||||
|
||||
NvlStatus
|
||||
nvswitch_os_acquire_fabric_mgmt_cap
|
||||
(
|
||||
void *osPrivate,
|
||||
NvU64 capDescriptor
|
||||
);
|
||||
|
||||
int
|
||||
nvswitch_os_is_fabric_manager
|
||||
(
|
||||
void *osPrivate
|
||||
);
|
||||
|
||||
int
|
||||
nvswitch_os_is_admin
|
||||
(
|
||||
void
|
||||
);
|
||||
|
||||
NvlStatus
|
||||
nvswitch_os_get_os_version
|
||||
(
|
||||
NvU32 *pMajorVer,
|
||||
NvU32 *pMinorVer,
|
||||
NvU32 *pBuildNum
|
||||
);
|
||||
|
||||
void
|
||||
nvswitch_lib_smbpbi_log_sxid
|
||||
(
|
||||
nvswitch_device *device,
|
||||
NvU32 sxid,
|
||||
const char *pFormat,
|
||||
...
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief: OS Specific handling to add an event.
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_os_add_client_event
|
||||
(
|
||||
void *osHandle,
|
||||
void *osPrivate,
|
||||
NvU32 eventId
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief: OS specific handling to remove all events corresponding to osPrivate.
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_os_remove_client_event
|
||||
(
|
||||
void *osHandle,
|
||||
void *osPrivate
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief: OS specific handling to notify an event.
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_os_notify_client_event
|
||||
(
|
||||
void *osHandle,
|
||||
void *osPrivate,
|
||||
NvU32 eventId
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief: Gets OS specific support for the REGISTER_EVENTS ioctl
|
||||
*/
|
||||
NvlStatus
|
||||
nvswitch_os_get_supported_register_events_params
|
||||
(
|
||||
NvBool *bSupportsManyEvents,
|
||||
NvBool *bUserSuppliesOsData
|
||||
);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif //_NVSWITCH_EXPORT_H_
|
||||
350
kernel-open/nvidia/i2c_nvswitch.c
Normal file
350
kernel-open/nvidia/i2c_nvswitch.c
Normal file
@@ -0,0 +1,350 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "linux_nvswitch.h"
|
||||
#include <linux/i2c.h>
|
||||
|
||||
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
|
||||
|
||||
#define NVSWITCH_I2C_GET_PARENT(adapter) \
|
||||
(NVSWITCH_DEV *)pci_get_drvdata(to_pci_dev((adapter)->dev.parent));
|
||||
|
||||
#define NVSWITCH_I2C_GET_ALGO_DATA(adapter) \
|
||||
(nvswitch_i2c_algo_data *)(adapter)->algo_data;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 port;
|
||||
} nvswitch_i2c_algo_data;
|
||||
|
||||
static int
|
||||
nvswitch_i2c_algo_master_xfer
|
||||
(
|
||||
struct i2c_adapter *adapter,
|
||||
struct i2c_msg msgs[],
|
||||
int num
|
||||
)
|
||||
{
|
||||
int rc;
|
||||
int i;
|
||||
NvU32 port;
|
||||
NvlStatus status = NVL_SUCCESS;
|
||||
nvswitch_i2c_algo_data *i2c_algo_data;
|
||||
NVSWITCH_DEV *nvswitch_dev;
|
||||
const unsigned int supported_i2c_flags = I2C_M_RD
|
||||
#if defined (I2C_M_DMA_SAFE)
|
||||
| I2C_M_DMA_SAFE
|
||||
#endif
|
||||
;
|
||||
|
||||
nvswitch_dev = NVSWITCH_I2C_GET_PARENT(adapter);
|
||||
if (nvswitch_dev == NULL)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
rc = mutex_lock_interruptible(&nvswitch_dev->device_mutex);
|
||||
if (rc)
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (nvswitch_dev->unusable)
|
||||
{
|
||||
printk(KERN_INFO "%s: a stale fd detected\n", nvswitch_dev->name);
|
||||
status = NVL_ERR_INVALID_STATE;
|
||||
goto nvswitch_i2c_algo_master_xfer_exit;
|
||||
}
|
||||
|
||||
i2c_algo_data = NVSWITCH_I2C_GET_ALGO_DATA(adapter);
|
||||
if (i2c_algo_data == NULL)
|
||||
{
|
||||
status = NVL_ERR_INVALID_STATE;
|
||||
goto nvswitch_i2c_algo_master_xfer_exit;
|
||||
}
|
||||
|
||||
port = i2c_algo_data->port;
|
||||
|
||||
for (i = 0; (i < num) && (status == NVL_SUCCESS); i++)
|
||||
{
|
||||
if (msgs[i].flags & ~supported_i2c_flags)
|
||||
{
|
||||
status = NVL_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = nvswitch_lib_i2c_transfer(nvswitch_dev->lib_device, port,
|
||||
(msgs[i].flags & I2C_M_RD) ?
|
||||
NVSWITCH_I2C_CMD_READ : NVSWITCH_I2C_CMD_WRITE,
|
||||
(NvU8)(msgs[i].addr & 0x7f), 0,
|
||||
(NvU32)(msgs[i].len & 0xffffUL),
|
||||
(NvU8 *)msgs[i].buf);
|
||||
}
|
||||
}
|
||||
|
||||
nvswitch_i2c_algo_master_xfer_exit:
|
||||
mutex_unlock(&nvswitch_dev->device_mutex);
|
||||
|
||||
rc = nvswitch_map_status(status);
|
||||
return (rc == 0) ? num : rc;
|
||||
}
|
||||
|
||||
static int
|
||||
nvswitch_i2c_algo_smbus_xfer
|
||||
(
|
||||
struct i2c_adapter *adapter,
|
||||
u16 addr,
|
||||
unsigned short flags,
|
||||
char read_write,
|
||||
u8 command,
|
||||
int protocol,
|
||||
union i2c_smbus_data *data
|
||||
)
|
||||
{
|
||||
int rc = -EIO;
|
||||
NvU32 port;
|
||||
NvU8 cmd;
|
||||
NvU32 len;
|
||||
NvU8 type;
|
||||
NvU8 *xfer_data;
|
||||
NvlStatus status = NVL_SUCCESS;
|
||||
nvswitch_i2c_algo_data *i2c_algo_data;
|
||||
NVSWITCH_DEV *nvswitch_dev;
|
||||
|
||||
nvswitch_dev = NVSWITCH_I2C_GET_PARENT(adapter);
|
||||
if (nvswitch_dev == NULL)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
rc = mutex_lock_interruptible(&nvswitch_dev->device_mutex);
|
||||
if (rc)
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (nvswitch_dev->unusable)
|
||||
{
|
||||
printk(KERN_INFO "%s: a stale fd detected\n", nvswitch_dev->name);
|
||||
status = NVL_ERR_INVALID_STATE;
|
||||
goto nvswitch_i2c_algo_smbus_xfer_exit;
|
||||
}
|
||||
|
||||
i2c_algo_data = NVSWITCH_I2C_GET_ALGO_DATA(adapter);
|
||||
if (i2c_algo_data == NULL)
|
||||
{
|
||||
status = NVL_ERR_INVALID_STATE;
|
||||
goto nvswitch_i2c_algo_smbus_xfer_exit;
|
||||
}
|
||||
|
||||
port = i2c_algo_data->port;
|
||||
|
||||
switch (protocol)
|
||||
{
|
||||
case I2C_SMBUS_QUICK:
|
||||
{
|
||||
cmd = 0;
|
||||
len = 0;
|
||||
type = (read_write == I2C_SMBUS_READ) ?
|
||||
NVSWITCH_I2C_CMD_SMBUS_QUICK_READ :
|
||||
NVSWITCH_I2C_CMD_SMBUS_QUICK_WRITE;
|
||||
xfer_data = NULL;
|
||||
break;
|
||||
}
|
||||
case I2C_SMBUS_BYTE:
|
||||
{
|
||||
cmd = 0;
|
||||
len = 1;
|
||||
|
||||
if (read_write == I2C_SMBUS_READ)
|
||||
{
|
||||
type = NVSWITCH_I2C_CMD_READ;
|
||||
xfer_data = (NvU8 *)&data->byte;
|
||||
}
|
||||
else
|
||||
{
|
||||
type = NVSWITCH_I2C_CMD_WRITE;
|
||||
xfer_data = &command;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case I2C_SMBUS_BYTE_DATA:
|
||||
{
|
||||
cmd = (NvU8)command;
|
||||
len = 1;
|
||||
type = (read_write == I2C_SMBUS_READ) ?
|
||||
NVSWITCH_I2C_CMD_SMBUS_READ :
|
||||
NVSWITCH_I2C_CMD_SMBUS_WRITE;
|
||||
cmd = (NvU8)command;
|
||||
xfer_data = (NvU8 *)&data->byte;
|
||||
break;
|
||||
}
|
||||
case I2C_SMBUS_WORD_DATA:
|
||||
{
|
||||
cmd = (NvU8)command;
|
||||
len = 2;
|
||||
type = (read_write == I2C_SMBUS_READ) ?
|
||||
NVSWITCH_I2C_CMD_SMBUS_READ :
|
||||
NVSWITCH_I2C_CMD_SMBUS_WRITE;
|
||||
xfer_data = (NvU8 *)&data->word;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
status = NVL_BAD_ARGS;
|
||||
goto nvswitch_i2c_algo_smbus_xfer_exit;
|
||||
}
|
||||
}
|
||||
|
||||
status = nvswitch_lib_i2c_transfer(nvswitch_dev->lib_device, port,
|
||||
type, (NvU8)(addr & 0x7f),
|
||||
cmd, len, (NvU8 *)xfer_data);
|
||||
|
||||
nvswitch_i2c_algo_smbus_xfer_exit:
|
||||
mutex_unlock(&nvswitch_dev->device_mutex);
|
||||
|
||||
return nvswitch_map_status(status);
|
||||
}
|
||||
|
||||
static u32 nvswitch_i2c_algo_functionality(struct i2c_adapter *adapter)
|
||||
{
|
||||
return (I2C_FUNC_I2C |
|
||||
I2C_FUNC_SMBUS_QUICK |
|
||||
I2C_FUNC_SMBUS_BYTE |
|
||||
I2C_FUNC_SMBUS_BYTE_DATA |
|
||||
I2C_FUNC_SMBUS_WORD_DATA);
|
||||
}
|
||||
|
||||
static struct i2c_algorithm nvswitch_i2c_algo = {
|
||||
.master_xfer = nvswitch_i2c_algo_master_xfer,
|
||||
.smbus_xfer = nvswitch_i2c_algo_smbus_xfer,
|
||||
.functionality = nvswitch_i2c_algo_functionality,
|
||||
};
|
||||
|
||||
struct i2c_adapter nvswitch_i2c_adapter_prototype = {
|
||||
.owner = THIS_MODULE,
|
||||
.algo = &nvswitch_i2c_algo,
|
||||
.algo_data = NULL,
|
||||
};
|
||||
|
||||
struct i2c_adapter *
|
||||
nvswitch_i2c_add_adapter
|
||||
(
|
||||
NVSWITCH_DEV *nvswitch_dev,
|
||||
NvU32 port
|
||||
)
|
||||
{
|
||||
struct i2c_adapter *adapter = NULL;
|
||||
int rc = 0;
|
||||
struct pci_dev *pci_dev;
|
||||
nvswitch_i2c_algo_data *i2c_algo_data = NULL;
|
||||
|
||||
if (nvswitch_dev == NULL)
|
||||
{
|
||||
printk(KERN_ERR "nvswitch_dev is NULL!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
adapter = nvswitch_os_malloc(sizeof(struct i2c_adapter));
|
||||
if (adapter == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nvswitch_os_memcpy(adapter,
|
||||
&nvswitch_i2c_adapter_prototype,
|
||||
sizeof(struct i2c_adapter));
|
||||
|
||||
i2c_algo_data = nvswitch_os_malloc(sizeof(nvswitch_i2c_algo_data));
|
||||
if (i2c_algo_data == NULL)
|
||||
{
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
i2c_algo_data->port = port;
|
||||
pci_dev = nvswitch_dev->pci_dev;
|
||||
adapter->dev.parent = &pci_dev->dev;
|
||||
adapter->algo_data = (void *)i2c_algo_data;
|
||||
|
||||
rc = nvswitch_os_snprintf(adapter->name,
|
||||
sizeof(adapter->name),
|
||||
"NVIDIA NVSwitch i2c adapter %u at %x:%02x.%u",
|
||||
port,
|
||||
NV_PCI_BUS_NUMBER(pci_dev),
|
||||
NV_PCI_SLOT_NUMBER(pci_dev),
|
||||
PCI_FUNC(pci_dev->devfn));
|
||||
if ((rc < 0) && (rc >= sizeof(adapter->name)))
|
||||
{
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
rc = i2c_add_adapter(adapter);
|
||||
if (rc < 0)
|
||||
{
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return adapter;
|
||||
|
||||
cleanup:
|
||||
nvswitch_os_free(i2c_algo_data);
|
||||
nvswitch_os_free(adapter);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
nvswitch_i2c_del_adapter
|
||||
(
|
||||
struct i2c_adapter *adapter
|
||||
)
|
||||
{
|
||||
if (adapter != NULL)
|
||||
{
|
||||
nvswitch_os_free(adapter->algo_data);
|
||||
i2c_del_adapter(adapter);
|
||||
nvswitch_os_free(adapter);
|
||||
}
|
||||
}
|
||||
|
||||
#else // (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE))
|
||||
|
||||
struct i2c_adapter *
|
||||
nvswitch_i2c_add_adapter
|
||||
(
|
||||
NVSWITCH_DEV *nvswitch_dev,
|
||||
NvU32 port
|
||||
)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
nvswitch_i2c_del_adapter
|
||||
(
|
||||
struct i2c_adapter *adapter
|
||||
)
|
||||
{
|
||||
}
|
||||
|
||||
#endif // (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE))
|
||||
144
kernel-open/nvidia/ioctl_common_nvswitch.h
Normal file
144
kernel-open/nvidia/ioctl_common_nvswitch.h
Normal file
@@ -0,0 +1,144 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _IOCTL_COMMON_NVSWITCH_H_
|
||||
#define _IOCTL_COMMON_NVSWITCH_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#define NVSWITCH_DEV_IO_TYPE 'd'
|
||||
#define NVSWITCH_CTL_IO_TYPE 'c'
|
||||
|
||||
/*
|
||||
* Defines for IOCTL Hints
|
||||
*
|
||||
* NVSWITCH_IO_READ_ONLY :
|
||||
* Only reads parameters from the kernel and does not pass any to it
|
||||
*
|
||||
* NVSWITCH_IO_WRITE_ONLY :
|
||||
* Only writes parameters to the kernel, but does not want anything back.
|
||||
*
|
||||
* NVSWITCH_IO_WRITE_READ :
|
||||
* Writes data to the kernel and wants information back
|
||||
*
|
||||
* NVSWITCH_IO_DEFAULT :
|
||||
* Don't copy anything into the kernel, nor copy anything back.
|
||||
*/
|
||||
#define NVSWITCH_IO_READ_ONLY 0x0
|
||||
#define NVSWITCH_IO_WRITE_ONLY 0x1
|
||||
#define NVSWITCH_IO_WRITE_READ 0x2
|
||||
#define NVSWITCH_IO_DEFAULT 0x3
|
||||
|
||||
#if (defined(_WIN32) || defined(_WIN64))
|
||||
/*
|
||||
* Values of less than 0x800 are reserved for Microsoft.
|
||||
* Values of 0x800 and higher can be used by vendors.
|
||||
*/
|
||||
#define IOCTL_START_INDEX 0x800
|
||||
|
||||
/*
|
||||
* Macro for defining new IOCTLs in a platform independent way.
|
||||
*/
|
||||
#define NVSWITCH_IOCTL_CODE(ioType, ctrl, paramType, direction) \
|
||||
CTL_CODE(FILE_DEVICE_UNKNOWN, IOCTL_START_INDEX + ctrl, METHOD_BUFFERED, \
|
||||
(FILE_READ_DATA | FILE_WRITE_DATA))
|
||||
#else
|
||||
|
||||
/*
|
||||
* Macro for defining new IOCTLs in a platform independent way.
|
||||
*
|
||||
* Select Linux specific IOCTL defining macro (_IO, _IOR, _IOW, _IOWR)
|
||||
* based on IOCTL direction.
|
||||
*/
|
||||
#define NVSWITCH_IOCTL_CODE(ioType, ctrl, paramType, direction) \
|
||||
((direction == NVSWITCH_IO_READ_ONLY) ? _IOR(ioType, ctrl, paramType) : \
|
||||
(direction == NVSWITCH_IO_WRITE_ONLY) ? _IOW(ioType, ctrl, paramType) : \
|
||||
(direction == NVSWITCH_IO_WRITE_READ) ? _IOWR(ioType, ctrl, paramType) : \
|
||||
_IO(ioType, ctrl))
|
||||
|
||||
#endif // (defined(_WIN32) || defined(_WIN64))
|
||||
|
||||
/*
|
||||
* NVSWITCH_NVLINK_MAX_LANES is used by both internal and exteranl IOCTLs.
|
||||
*/
|
||||
#define NVSWITCH_NVLINK_MAX_LANES 4
|
||||
|
||||
/*
|
||||
* Common Fabric State enums
|
||||
*
|
||||
* Definitions:
|
||||
* Driver Fabric State is intended to reflect the state of the driver and
|
||||
* fabric manager. Once FM sets the Driver State to CONFIGURED, it is
|
||||
* expected the FM will send heartbeat updates. If the heartbeat is not
|
||||
* received before the session timeout, then the driver reports status
|
||||
* as MANAGER_TIMEOUT.
|
||||
*
|
||||
* Device Fabric State reflects the state of the nvswitch device.
|
||||
* FM sets the Device Fabric State to CONFIGURED once FM is managing the
|
||||
* device. If the Device Fabric State is BLACKLISTED then the device is
|
||||
* not available for use; opens fail for a blacklisted device, and interrupts
|
||||
* are disabled.
|
||||
*
|
||||
* Blacklist Reason provides additional detail of why a device is blacklisted.
|
||||
*/
|
||||
typedef enum nvswitch_driver_fabric_state
|
||||
{
|
||||
NVSWITCH_DRIVER_FABRIC_STATE_OFFLINE = 0, // offline (No driver loaded)
|
||||
NVSWITCH_DRIVER_FABRIC_STATE_STANDBY, // driver up, no FM
|
||||
NVSWITCH_DRIVER_FABRIC_STATE_CONFIGURED, // driver up, FM up
|
||||
NVSWITCH_DRIVER_FABRIC_STATE_MANAGER_TIMEOUT, // driver up, FM timed out
|
||||
NVSWITCH_DRIVER_FABRIC_STATE_MANAGER_ERROR, // driver up, FM in error state
|
||||
NVSWITCH_DRIVER_FABRIC_STATE_COUNT
|
||||
} NVSWITCH_DRIVER_FABRIC_STATE;
|
||||
|
||||
typedef enum nvswitch_device_fabric_state
|
||||
{
|
||||
NVSWITCH_DEVICE_FABRIC_STATE_OFFLINE = 0, // offline: No driver, no FM
|
||||
NVSWITCH_DEVICE_FABRIC_STATE_STANDBY, // driver up, no FM, not blacklisted
|
||||
NVSWITCH_DEVICE_FABRIC_STATE_CONFIGURED, // driver up, FM up, not blacklisted
|
||||
NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED, // device is blacklisted
|
||||
NVSWITCH_DEVICE_FABRIC_STATE_COUNT
|
||||
} NVSWITCH_DEVICE_FABRIC_STATE;
|
||||
|
||||
typedef enum nvswitch_device_blacklist_mode
|
||||
{
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_NONE = 0, // device is not blacklisted
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_OUT_OF_BAND, // manually blacklisted by out-of-band client
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_IN_BAND, // manually blacklisted by in-band OS config
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_PEER, // FM indicates blacklisted due to peer manual blacklisted
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_TRUNK_LINK_FAILURE, // FM indicates blacklisted due to trunk link failure
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_TRUNK_LINK_FAILURE_PEER, // FM indicates blacklisted due to trunk link failure of peer
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_ACCESS_LINK_FAILURE, // FM indicates blacklisted due to access link failure
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_ACCESS_LINK_FAILURE_PEER, // FM indicates blacklisted due to access link failure of peer
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_UNSPEC_DEVICE_FAILURE, // FM indicates blacklisted due to unspecified device failure
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON_UNSPEC_DEVICE_FAILURE_PEER // FM indicates blacklisted due to unspec device failure of peer
|
||||
} NVSWITCH_DEVICE_BLACKLIST_REASON;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif //_IOCTL_COMMON_NVSWITCH_H_
|
||||
238
kernel-open/nvidia/ioctl_nvswitch.h
Normal file
238
kernel-open/nvidia/ioctl_nvswitch.h
Normal file
@@ -0,0 +1,238 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _IOCTL_NVSWITCH_H_
|
||||
#define _IOCTL_NVSWITCH_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#include "ioctl_common_nvswitch.h"
|
||||
#include "nvCpuUuid.h"
|
||||
|
||||
/* 4 chars for "SWX-" prefix + 36 chars for UUID string + 1 char for '\0' */
|
||||
#define NVSWITCH_UUID_STRING_LENGTH 41
|
||||
|
||||
#define NVSWITCH_NIBBLE_TO_CHAR(nibble) \
|
||||
(((nibble) > 9) ? (((nibble) - 10) + 'A') : ((nibble) + '0'))
|
||||
|
||||
static NV_INLINE
|
||||
NvU32 nvswitch_uuid_to_string(NvUuid *uuid, char *str, NvU32 strLen)
|
||||
{
|
||||
int i;
|
||||
int j = 0;
|
||||
|
||||
if ((uuid == NULL) || (str == NULL) || (strLen < NVSWITCH_UUID_STRING_LENGTH))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
str[j++] = 'S';
|
||||
str[j++] = 'W';
|
||||
str[j++] = 'X';
|
||||
str[j++] = '-';
|
||||
|
||||
for (i = 0; i < NV_UUID_LEN; i++)
|
||||
{
|
||||
if ((i == 4) || (i == 6) || (i == 8) || (i == 10))
|
||||
{
|
||||
str[j++] = '-';
|
||||
}
|
||||
|
||||
str[j++] = NVSWITCH_NIBBLE_TO_CHAR((uuid->uuid[i] & 0xF0) >> 4);
|
||||
str[j++] = NVSWITCH_NIBBLE_TO_CHAR(uuid->uuid[i] & 0x0F);
|
||||
}
|
||||
|
||||
str[j++] = '\0';
|
||||
|
||||
return j;
|
||||
}
|
||||
|
||||
/*
|
||||
* This file defines IOCTL calls that work with nvidia-nvswitchctl
|
||||
* (device agnostic) node.
|
||||
*/
|
||||
|
||||
#define NVSWITCH_VERSION_STRING_LENGTH 64
|
||||
|
||||
/*
|
||||
* Version string
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
char version[NVSWITCH_VERSION_STRING_LENGTH];
|
||||
} NVSWITCH_VERSION;
|
||||
|
||||
/*
|
||||
* NVSWITCH_CTL_CHECK_VERSION
|
||||
*
|
||||
* The interface will check if the client's version is supported by the driver.
|
||||
*
|
||||
* Parameters:
|
||||
* user[in]
|
||||
* Version of the interface that the client is compiled with.
|
||||
* kernel[out]
|
||||
* Version of the interface that the kernel driver is compiled with.
|
||||
* is_compatible[out]
|
||||
* Set to true, if user and kernel version are compatible.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NVSWITCH_VERSION user;
|
||||
NVSWITCH_VERSION kernel;
|
||||
NvBool is_compatible;
|
||||
} NVSWITCH_CHECK_VERSION_PARAMS;
|
||||
|
||||
/*
|
||||
* Max devices supported by the driver
|
||||
*
|
||||
* See ctrl_dev_nvswitch.h for preprocessor definition modification guidelines.
|
||||
*/
|
||||
#define NVSWITCH_MAX_DEVICES 64
|
||||
|
||||
/*
|
||||
* NVSWITCH_CTL_GET_DEVICES
|
||||
*
|
||||
* This control call will be removed soon. Use NVSWITCH_CTL_GET_DEVICES_V2 instead.
|
||||
*
|
||||
* Provides information about registered NvSwitch devices.
|
||||
*
|
||||
* Parameters:
|
||||
* deviceInstance[out]
|
||||
* Device instance of the device. This is same as the device minor number
|
||||
* for Linux platforms.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NvU32 deviceInstance;
|
||||
NvU32 pciDomain;
|
||||
NvU32 pciBus;
|
||||
NvU32 pciDevice;
|
||||
NvU32 pciFunction;
|
||||
/* See ctrl_dev_nvswitch.h for struct definition modification guidelines */
|
||||
} NVSWITCH_DEVICE_INSTANCE_INFO;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 deviceCount;
|
||||
NVSWITCH_DEVICE_INSTANCE_INFO info[NVSWITCH_MAX_DEVICES];
|
||||
/* See ctrl_dev_nvswitch.h for struct definition modification guidelines */
|
||||
} NVSWITCH_GET_DEVICES_PARAMS;
|
||||
|
||||
/*
|
||||
* NVSWITCH_CTL_GET_DEVICES_V2
|
||||
*
|
||||
* Provides information about registered NvSwitch devices.
|
||||
* V2 adds a UUID field to the device instance info struct
|
||||
*
|
||||
* Parameters:
|
||||
* deviceInstance[out]
|
||||
* Device instance of the device. This is same as the device minor number
|
||||
* for Linux platforms.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NvU32 deviceInstance;
|
||||
NvUuid uuid;
|
||||
NvU32 pciDomain;
|
||||
NvU32 pciBus;
|
||||
NvU32 pciDevice;
|
||||
NvU32 pciFunction;
|
||||
NVSWITCH_DRIVER_FABRIC_STATE driverState;
|
||||
NVSWITCH_DEVICE_FABRIC_STATE deviceState;
|
||||
NVSWITCH_DEVICE_BLACKLIST_REASON deviceReason;
|
||||
NvU32 physId;
|
||||
|
||||
/* See ctrl_dev_nvswitch.h for struct definition modification guidelines */
|
||||
} NVSWITCH_DEVICE_INSTANCE_INFO_V2;
|
||||
|
||||
#define NVSWITCH_INVALID_PHYS_ID NV_U32_MAX
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 deviceCount;
|
||||
NVSWITCH_DEVICE_INSTANCE_INFO_V2 info[NVSWITCH_MAX_DEVICES];
|
||||
/* See ctrl_dev_nvswitch.h for struct definition modification guidelines */
|
||||
} NVSWITCH_GET_DEVICES_V2_PARAMS;
|
||||
|
||||
#define NVSWITCH_DEVICE_NAME_STRING_LENGTH 10
|
||||
|
||||
/*
|
||||
* CTRL_NVSWITCH_GET_DEVICE_NODES
|
||||
*
|
||||
* Provides a mapping of the VMWare kernel device names (vmfgx[N]) and registered
|
||||
* NVSwitch devices (nvidia-nvswitch[N]).
|
||||
*
|
||||
* This IOCTL is only implemented for VMWare.
|
||||
*
|
||||
* Parameters:
|
||||
* deviceInstance[out]
|
||||
* Device instance of the device. This is same as the device minor number
|
||||
* for VMWare platforms.
|
||||
* dev_name[out]
|
||||
* VMWare kernel device name of the nvswitch device (vmfgx[N])
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NvU32 deviceInstance;
|
||||
NvUuid uuid;
|
||||
NvU8 dev_name[NVSWITCH_DEVICE_NAME_STRING_LENGTH];
|
||||
/* See ctrl_dev_nvswitch.h for struct definition modification guidelines */
|
||||
} NVSWITCH_DEVICE_NODE_INFO;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 deviceCount;
|
||||
NVSWITCH_DEVICE_NODE_INFO info[NVSWITCH_MAX_DEVICES];
|
||||
/* See ctrl_dev_nvswitch.h for struct definition modification guidelines */
|
||||
} NVSWITCH_GET_DEVICE_NODES_PARAMS;
|
||||
|
||||
#define CTRL_NVSWITCH_GET_DEVICES 0x01
|
||||
#define CTRL_NVSWITCH_CHECK_VERSION 0x02
|
||||
#define CTRL_NVSWITCH_GET_DEVICES_V2 0x03
|
||||
#define CTRL_NVSWITCH_GET_DEVICE_NODES 0x04
|
||||
|
||||
/*
|
||||
* Nvswitchctl (device agnostic) IOCTLs
|
||||
*/
|
||||
|
||||
#define IOCTL_NVSWITCH_GET_DEVICES \
|
||||
NVSWITCH_IOCTL_CODE(NVSWITCH_CTL_IO_TYPE, CTRL_NVSWITCH_GET_DEVICES, NVSWITCH_GET_DEVICES_PARAMS, \
|
||||
NVSWITCH_IO_READ_ONLY)
|
||||
#define IOCTL_NVSWITCH_CHECK_VERSION \
|
||||
NVSWITCH_IOCTL_CODE(NVSWITCH_CTL_IO_TYPE, CTRL_NVSWITCH_CHECK_VERSION, NVSWITCH_CHECK_VERSION_PARAMS, \
|
||||
NVSWITCH_IO_WRITE_READ)
|
||||
#define IOCTL_NVSWITCH_GET_DEVICES_V2 \
|
||||
NVSWITCH_IOCTL_CODE(NVSWITCH_CTL_IO_TYPE, CTRL_NVSWITCH_GET_DEVICES_V2, NVSWITCH_GET_DEVICES_V2_PARAMS, \
|
||||
NVSWITCH_IO_READ_ONLY)
|
||||
#define IOCTL_NVSWITCH_GET_DEVICE_NODES \
|
||||
NVSWITCH_IOCTL_CODE(NVSWITCH_CTL_IO_TYPE, CTRL_NVSWITCH_GET_DEVICE_NODES, NVSWITCH_GET_DEVICE_NODES_PARAMS, \
|
||||
NVSWITCH_IO_READ_ONLY)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //_IOCTL_NVSWITCH_H_
|
||||
2673
kernel-open/nvidia/linux_nvswitch.c
Normal file
2673
kernel-open/nvidia/linux_nvswitch.c
Normal file
File diff suppressed because it is too large
Load Diff
90
kernel-open/nvidia/linux_nvswitch.h
Normal file
90
kernel-open/nvidia/linux_nvswitch.h
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef LINUX_NVSWITCH_H
|
||||
#define LINUX_NVSWITCH_H
|
||||
|
||||
#include "nvmisc.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-kthread-q.h"
|
||||
#include "export_nvswitch.h"
|
||||
|
||||
#define NVSWITCH_SHORT_NAME "nvswi"
|
||||
|
||||
#define NVSWITCH_IRQ_NONE 0
|
||||
#define NVSWITCH_IRQ_MSIX 1
|
||||
#define NVSWITCH_IRQ_MSI 2
|
||||
#define NVSWITCH_IRQ_PIN 3
|
||||
|
||||
#define NVSWITCH_OS_ASSERT(_cond) \
|
||||
nvswitch_os_assert_log((_cond), "NVSwitch: Assertion failed in %s() at %s:%d\n", \
|
||||
__FUNCTION__ , __FILE__, __LINE__)
|
||||
|
||||
#define NVSWITCH_KMALLOC_LIMIT (128 * 1024)
|
||||
|
||||
#define nvswitch_os_malloc(_size) nvswitch_os_malloc_trace(_size, __FILE__, __LINE__)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
struct list_head entry;
|
||||
struct i2c_adapter *adapter;
|
||||
} nvswitch_i2c_adapter_entry;
|
||||
|
||||
// Per-chip driver state
|
||||
typedef struct
|
||||
{
|
||||
char name[sizeof(NVSWITCH_DRIVER_NAME) + 4];
|
||||
char sname[sizeof(NVSWITCH_SHORT_NAME) + 4]; /* short name */
|
||||
int minor;
|
||||
NvUuid uuid;
|
||||
struct mutex device_mutex;
|
||||
nvswitch_device *lib_device; /* nvswitch library device */
|
||||
wait_queue_head_t wait_q_errors;
|
||||
void *bar0;
|
||||
struct nv_kthread_q task_q; /* Background task queue */
|
||||
struct nv_kthread_q_item task_item; /* Background dispatch task */
|
||||
atomic_t task_q_ready;
|
||||
wait_queue_head_t wait_q_shutdown;
|
||||
struct pci_dev *pci_dev;
|
||||
atomic_t ref_count;
|
||||
struct list_head list_node;
|
||||
NvBool unusable;
|
||||
NvU32 phys_id;
|
||||
NvU64 bios_ver;
|
||||
#if defined(CONFIG_PROC_FS)
|
||||
struct proc_dir_entry *procfs_dir;
|
||||
#endif
|
||||
NvU8 irq_mechanism;
|
||||
struct list_head i2c_adapter_list;
|
||||
} NVSWITCH_DEV;
|
||||
|
||||
|
||||
int nvswitch_map_status(NvlStatus status);
|
||||
int nvswitch_procfs_init(void);
|
||||
void nvswitch_procfs_exit(void);
|
||||
int nvswitch_procfs_device_add(NVSWITCH_DEV *nvswitch_dev);
|
||||
void nvswitch_procfs_device_remove(NVSWITCH_DEV *nvswitch_dev);
|
||||
struct i2c_adapter *nvswitch_i2c_add_adapter(NVSWITCH_DEV *nvswitch_dev, NvU32 port);
|
||||
void nvswitch_i2c_del_adapter(struct i2c_adapter *adapter);
|
||||
|
||||
#endif // LINUX_NVSWITCH_H
|
||||
1880
kernel-open/nvidia/nv-acpi.c
Normal file
1880
kernel-open/nvidia/nv-acpi.c
Normal file
File diff suppressed because it is too large
Load Diff
821
kernel-open/nvidia/nv-caps.c
Normal file
821
kernel-open/nvidia/nv-caps.c
Normal file
@@ -0,0 +1,821 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nv-linux.h"
|
||||
#include "nv-caps.h"
|
||||
#include "nv-procfs.h"
|
||||
#include "nv-hash.h"
|
||||
|
||||
extern int NVreg_ModifyDeviceFiles;
|
||||
|
||||
/* sys_close() or __close_fd() */
|
||||
#include <linux/syscalls.h>
|
||||
|
||||
#define NV_CAP_DRV_MINOR_COUNT 8192
|
||||
|
||||
/* Hash table with 512 buckets */
|
||||
#define NV_CAP_HASH_BITS 9
|
||||
NV_DECLARE_HASHTABLE(g_nv_cap_hash_table, NV_CAP_HASH_BITS);
|
||||
|
||||
#define NV_CAP_HASH_SIZE NV_HASH_SIZE(g_nv_cap_hash_table)
|
||||
|
||||
#define nv_cap_hash_key(path) (nv_string_hash(path) % NV_CAP_HASH_SIZE)
|
||||
|
||||
typedef struct nv_cap_table_entry
|
||||
{
|
||||
/* name must be the first element */
|
||||
const char *name;
|
||||
int minor;
|
||||
struct hlist_node hlist;
|
||||
} nv_cap_table_entry_t;
|
||||
|
||||
#define NV_CAP_NUM_ENTRIES(_table) (sizeof(_table) / sizeof(_table[0]))
|
||||
|
||||
static nv_cap_table_entry_t g_nv_cap_nvlink_table[] =
|
||||
{
|
||||
{"/driver/nvidia-nvlink/capabilities/fabric-mgmt"}
|
||||
};
|
||||
|
||||
static nv_cap_table_entry_t g_nv_cap_mig_table[] =
|
||||
{
|
||||
{"/driver/nvidia/capabilities/mig/config"},
|
||||
{"/driver/nvidia/capabilities/mig/monitor"}
|
||||
};
|
||||
|
||||
#define NV_CAP_MIG_CI_ENTRIES(_gi) \
|
||||
{_gi "/ci0/access"}, \
|
||||
{_gi "/ci1/access"}, \
|
||||
{_gi "/ci2/access"}, \
|
||||
{_gi "/ci3/access"}, \
|
||||
{_gi "/ci4/access"}, \
|
||||
{_gi "/ci5/access"}, \
|
||||
{_gi "/ci6/access"}, \
|
||||
{_gi "/ci7/access"}
|
||||
|
||||
#define NV_CAP_MIG_GI_ENTRIES(_gpu) \
|
||||
{_gpu "/gi0/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi0"), \
|
||||
{_gpu "/gi1/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi1"), \
|
||||
{_gpu "/gi2/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi2"), \
|
||||
{_gpu "/gi3/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi3"), \
|
||||
{_gpu "/gi4/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi4"), \
|
||||
{_gpu "/gi5/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi5"), \
|
||||
{_gpu "/gi6/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi6"), \
|
||||
{_gpu "/gi7/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi7"), \
|
||||
{_gpu "/gi8/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi8"), \
|
||||
{_gpu "/gi9/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi9"), \
|
||||
{_gpu "/gi10/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi10"), \
|
||||
{_gpu "/gi11/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi11"), \
|
||||
{_gpu "/gi12/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi12"), \
|
||||
{_gpu "/gi13/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi13"), \
|
||||
{_gpu "/gi14/access"}, \
|
||||
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi14")
|
||||
|
||||
static nv_cap_table_entry_t g_nv_cap_mig_gpu_table[] =
|
||||
{
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu0/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu1/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu2/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu3/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu4/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu5/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu6/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu7/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu8/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu9/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu10/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu11/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu12/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu13/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu14/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu15/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu16/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu17/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu18/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu19/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu20/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu21/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu22/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu23/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu24/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu25/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu26/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu27/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu28/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu29/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu30/mig"),
|
||||
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu31/mig")
|
||||
};
|
||||
|
||||
struct nv_cap
|
||||
{
|
||||
char *path;
|
||||
char *name;
|
||||
int minor;
|
||||
int permissions;
|
||||
int modify;
|
||||
struct proc_dir_entry *parent;
|
||||
struct proc_dir_entry *entry;
|
||||
};
|
||||
|
||||
#define NV_CAP_PROCFS_WRITE_BUF_SIZE 128
|
||||
|
||||
typedef struct nv_cap_file_private
|
||||
{
|
||||
int minor;
|
||||
int permissions;
|
||||
int modify;
|
||||
char buffer[NV_CAP_PROCFS_WRITE_BUF_SIZE];
|
||||
off_t offset;
|
||||
} nv_cap_file_private_t;
|
||||
|
||||
struct
|
||||
{
|
||||
NvBool initialized;
|
||||
struct cdev cdev;
|
||||
dev_t devno;
|
||||
} g_nv_cap_drv;
|
||||
|
||||
#define NV_CAP_PROCFS_DIR "driver/nvidia-caps"
|
||||
#define NV_CAP_NAME_BUF_SIZE 128
|
||||
|
||||
static struct proc_dir_entry *nv_cap_procfs_dir;
|
||||
static struct proc_dir_entry *nv_cap_procfs_nvlink_minors;
|
||||
static struct proc_dir_entry *nv_cap_procfs_mig_minors;
|
||||
|
||||
static int nv_procfs_read_nvlink_minors(struct seq_file *s, void *v)
|
||||
{
|
||||
int i, count;
|
||||
char name[NV_CAP_NAME_BUF_SIZE];
|
||||
|
||||
count = NV_CAP_NUM_ENTRIES(g_nv_cap_nvlink_table);
|
||||
for (i = 0; i < count; i++)
|
||||
{
|
||||
if (sscanf(g_nv_cap_nvlink_table[i].name,
|
||||
"/driver/nvidia-nvlink/capabilities/%s", name) == 1)
|
||||
{
|
||||
name[sizeof(name) - 1] = '\0';
|
||||
seq_printf(s, "%s %d\n", name, g_nv_cap_nvlink_table[i].minor);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nv_procfs_read_mig_minors(struct seq_file *s, void *v)
|
||||
{
|
||||
int i, count, gpu;
|
||||
char name[NV_CAP_NAME_BUF_SIZE];
|
||||
|
||||
count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_table);
|
||||
for (i = 0; i < count; i++)
|
||||
{
|
||||
if (sscanf(g_nv_cap_mig_table[i].name,
|
||||
"/driver/nvidia/capabilities/mig/%s", name) == 1)
|
||||
{
|
||||
name[sizeof(name) - 1] = '\0';
|
||||
seq_printf(s, "%s %d\n", name, g_nv_cap_mig_table[i].minor);
|
||||
}
|
||||
}
|
||||
|
||||
count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_gpu_table);
|
||||
for (i = 0; i < count; i++)
|
||||
{
|
||||
if (sscanf(g_nv_cap_mig_gpu_table[i].name,
|
||||
"/driver/nvidia/capabilities/gpu%d/mig/%s", &gpu, name) == 2)
|
||||
{
|
||||
name[sizeof(name) - 1] = '\0';
|
||||
seq_printf(s, "gpu%d/%s %d\n",
|
||||
gpu, name, g_nv_cap_mig_gpu_table[i].minor);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(nvlink_minors, nv_system_pm_lock);
|
||||
|
||||
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(mig_minors, nv_system_pm_lock);
|
||||
|
||||
static void nv_cap_procfs_exit(void)
|
||||
{
|
||||
if (!nv_cap_procfs_dir)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv_procfs_unregister_all(nv_cap_procfs_dir, nv_cap_procfs_dir);
|
||||
nv_cap_procfs_dir = NULL;
|
||||
}
|
||||
|
||||
int nv_cap_procfs_init(void)
|
||||
{
|
||||
nv_cap_procfs_dir = NV_CREATE_PROC_DIR(NV_CAP_PROCFS_DIR, NULL);
|
||||
if (nv_cap_procfs_dir == NULL)
|
||||
{
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
nv_cap_procfs_mig_minors = NV_CREATE_PROC_FILE("mig-minors",
|
||||
nv_cap_procfs_dir,
|
||||
mig_minors,
|
||||
NULL);
|
||||
if (nv_cap_procfs_mig_minors == NULL)
|
||||
{
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
nv_cap_procfs_nvlink_minors = NV_CREATE_PROC_FILE("nvlink-minors",
|
||||
nv_cap_procfs_dir,
|
||||
nvlink_minors,
|
||||
NULL);
|
||||
if (nv_cap_procfs_nvlink_minors == NULL)
|
||||
{
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
nv_cap_procfs_exit();
|
||||
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
static int nv_cap_find_minor(char *path)
|
||||
{
|
||||
unsigned int key = nv_cap_hash_key(path);
|
||||
nv_cap_table_entry_t *entry;
|
||||
|
||||
nv_hash_for_each_possible(g_nv_cap_hash_table, entry, hlist, key)
|
||||
{
|
||||
if (strcmp(path, entry->name) == 0)
|
||||
{
|
||||
return entry->minor;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void _nv_cap_table_init(nv_cap_table_entry_t *table, int count)
|
||||
{
|
||||
int i;
|
||||
unsigned int key;
|
||||
static int minor = 0;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
{
|
||||
table[i].minor = minor++;
|
||||
INIT_HLIST_NODE(&table[i].hlist);
|
||||
key = nv_cap_hash_key(table[i].name);
|
||||
nv_hash_add(g_nv_cap_hash_table, &table[i].hlist, key);
|
||||
}
|
||||
|
||||
WARN_ON(minor > NV_CAP_DRV_MINOR_COUNT);
|
||||
}
|
||||
|
||||
#define nv_cap_table_init(table) \
|
||||
_nv_cap_table_init(table, NV_CAP_NUM_ENTRIES(table))
|
||||
|
||||
static void nv_cap_tables_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(nv_cap_table_entry_t, name) != 0);
|
||||
|
||||
nv_hash_init(g_nv_cap_hash_table);
|
||||
|
||||
nv_cap_table_init(g_nv_cap_nvlink_table);
|
||||
nv_cap_table_init(g_nv_cap_mig_table);
|
||||
nv_cap_table_init(g_nv_cap_mig_gpu_table);
|
||||
}
|
||||
|
||||
static ssize_t nv_cap_procfs_write(struct file *file,
|
||||
const char __user *buffer,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
nv_cap_file_private_t *private = NULL;
|
||||
unsigned long bytes_left;
|
||||
char *proc_buffer;
|
||||
|
||||
private = ((struct seq_file *)file->private_data)->private;
|
||||
bytes_left = (sizeof(private->buffer) - private->offset - 1);
|
||||
|
||||
if (count == 0)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((bytes_left == 0) || (count > bytes_left))
|
||||
{
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
proc_buffer = &private->buffer[private->offset];
|
||||
|
||||
if (copy_from_user(proc_buffer, buffer, count))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "nv-caps: failed to copy in proc data!\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
private->offset += count;
|
||||
proc_buffer[count] = '\0';
|
||||
|
||||
*pos = private->offset;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int nv_cap_procfs_read(struct seq_file *s, void *v)
|
||||
{
|
||||
nv_cap_file_private_t *private = s->private;
|
||||
|
||||
seq_printf(s, "%s: %d\n", "DeviceFileMinor", private->minor);
|
||||
seq_printf(s, "%s: %d\n", "DeviceFileMode", private->permissions);
|
||||
seq_printf(s, "%s: %d\n", "DeviceFileModify", private->modify);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nv_cap_procfs_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
nv_cap_file_private_t *private = NULL;
|
||||
int rc;
|
||||
nv_cap_t *cap = NV_PDE_DATA(inode);
|
||||
|
||||
NV_KMALLOC(private, sizeof(nv_cap_file_private_t));
|
||||
if (private == NULL)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
private->minor = cap->minor;
|
||||
private->permissions = cap->permissions;
|
||||
private->offset = 0;
|
||||
private->modify = cap->modify;
|
||||
|
||||
rc = single_open(file, nv_cap_procfs_read, private);
|
||||
if (rc < 0)
|
||||
{
|
||||
NV_KFREE(private, sizeof(nv_cap_file_private_t));
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = nv_down_read_interruptible(&nv_system_pm_lock);
|
||||
if (rc < 0)
|
||||
{
|
||||
single_release(inode, file);
|
||||
NV_KFREE(private, sizeof(nv_cap_file_private_t));
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int nv_cap_procfs_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *s = file->private_data;
|
||||
nv_cap_file_private_t *private = NULL;
|
||||
char *buffer;
|
||||
int modify;
|
||||
nv_cap_t *cap = NV_PDE_DATA(inode);
|
||||
|
||||
if (s != NULL)
|
||||
{
|
||||
private = s->private;
|
||||
}
|
||||
|
||||
up_read(&nv_system_pm_lock);
|
||||
|
||||
single_release(inode, file);
|
||||
|
||||
if (private != NULL)
|
||||
{
|
||||
buffer = private->buffer;
|
||||
|
||||
if (private->offset != 0)
|
||||
{
|
||||
if (sscanf(buffer, "DeviceFileModify: %d", &modify) == 1)
|
||||
{
|
||||
cap->modify = modify;
|
||||
}
|
||||
}
|
||||
|
||||
NV_KFREE(private, sizeof(nv_cap_file_private_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* All open files using the proc entry will be invalidated
|
||||
* if the entry is removed.
|
||||
*/
|
||||
file->private_data = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static nv_proc_ops_t g_nv_cap_procfs_fops = {
|
||||
NV_PROC_OPS_SET_OWNER()
|
||||
.NV_PROC_OPS_OPEN = nv_cap_procfs_open,
|
||||
.NV_PROC_OPS_RELEASE = nv_cap_procfs_release,
|
||||
.NV_PROC_OPS_WRITE = nv_cap_procfs_write,
|
||||
.NV_PROC_OPS_READ = seq_read,
|
||||
.NV_PROC_OPS_LSEEK = seq_lseek,
|
||||
};
|
||||
|
||||
/* forward declaration of g_nv_cap_drv_fops */
|
||||
static struct file_operations g_nv_cap_drv_fops;
|
||||
|
||||
int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd)
|
||||
{
|
||||
struct file *file;
|
||||
int dup_fd;
|
||||
struct inode *inode = NULL;
|
||||
dev_t rdev = 0;
|
||||
struct files_struct *files = current->files;
|
||||
struct fdtable *fdt;
|
||||
|
||||
if (cap == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
file = fget(fd);
|
||||
if (file == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
inode = NV_FILE_INODE(file);
|
||||
if (inode == NULL)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Make sure the fd belongs to the nv-cap-drv */
|
||||
if (file->f_op != &g_nv_cap_drv_fops)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Make sure the fd has the expected capability */
|
||||
rdev = inode->i_rdev;
|
||||
if (MINOR(rdev) != cap->minor)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
|
||||
dup_fd = NV_GET_UNUSED_FD_FLAGS(O_CLOEXEC);
|
||||
if (dup_fd < 0)
|
||||
{
|
||||
dup_fd = NV_GET_UNUSED_FD();
|
||||
if (dup_fd < 0)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set CLOEXEC before installing the FD.
|
||||
*
|
||||
* If fork() happens in between, the opened unused FD will have
|
||||
* a NULL struct file associated with it, which is okay.
|
||||
*
|
||||
* The only well known bug here is the race with dup(2), which is
|
||||
* already documented in the kernel, see fd_install()'s description.
|
||||
*/
|
||||
|
||||
spin_lock(&files->file_lock);
|
||||
fdt = files_fdtable(files);
|
||||
NV_SET_CLOSE_ON_EXEC(dup_fd, fdt);
|
||||
spin_unlock(&files->file_lock);
|
||||
}
|
||||
|
||||
fd_install(dup_fd, file);
|
||||
return dup_fd;
|
||||
|
||||
err:
|
||||
fput(file);
|
||||
return -1;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_cap_close_fd(int fd)
|
||||
{
|
||||
if (fd == -1)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire task_lock as we access current->files explicitly (__close_fd)
|
||||
* and implicitly (sys_close), and it will race with the exit path.
|
||||
*/
|
||||
task_lock(current);
|
||||
|
||||
/* Nothing to do, we are in exit path */
|
||||
if (current->files == NULL)
|
||||
{
|
||||
task_unlock(current);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* From v4.17-rc1 (to v5.10.8) kernels have stopped exporting sys_close(fd)
|
||||
* and started exporting __close_fd, as of this commit:
|
||||
* 2018-04-02 2ca2a09d6215 ("fs: add ksys_close() wrapper; remove in-kernel
|
||||
* calls to sys_close()")
|
||||
* Kernels v5.11-rc1 onwards have stopped exporting __close_fd, and started
|
||||
* exporting close_fd, as of this commit:
|
||||
* 2020-12-20 8760c909f54a ("file: Rename __close_fd to close_fd and remove
|
||||
* the files parameter")
|
||||
*/
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_close_fd
|
||||
close_fd(fd);
|
||||
#elif NV_IS_EXPORT_SYMBOL_PRESENT___close_fd
|
||||
__close_fd(current->files, fd);
|
||||
#else
|
||||
sys_close(fd);
|
||||
#endif
|
||||
|
||||
task_unlock(current);
|
||||
}
|
||||
|
||||
static nv_cap_t* nv_cap_alloc(nv_cap_t *parent_cap, const char *name)
|
||||
{
|
||||
nv_cap_t *cap;
|
||||
int len;
|
||||
|
||||
if (parent_cap == NULL || name == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
NV_KMALLOC(cap, sizeof(nv_cap_t));
|
||||
if (cap == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
len = strlen(name) + strlen(parent_cap->path) + 2;
|
||||
NV_KMALLOC(cap->path, len);
|
||||
if (cap->path == NULL)
|
||||
{
|
||||
NV_KFREE(cap, sizeof(nv_cap_t));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
strcpy(cap->path, parent_cap->path);
|
||||
strcat(cap->path, "/");
|
||||
strcat(cap->path, name);
|
||||
|
||||
len = strlen(name) + 1;
|
||||
NV_KMALLOC(cap->name, len);
|
||||
if (cap->name == NULL)
|
||||
{
|
||||
NV_KFREE(cap->path, strlen(cap->path) + 1);
|
||||
NV_KFREE(cap, sizeof(nv_cap_t));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
strcpy(cap->name, name);
|
||||
|
||||
cap->minor = -1;
|
||||
cap->modify = NVreg_ModifyDeviceFiles;
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
static void nv_cap_free(nv_cap_t *cap)
|
||||
{
|
||||
if (cap == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
NV_KFREE(cap->path, strlen(cap->path) + 1);
|
||||
NV_KFREE(cap->name, strlen(cap->name) + 1);
|
||||
NV_KFREE(cap, sizeof(nv_cap_t));
|
||||
}
|
||||
|
||||
nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap,
|
||||
const char *name, int mode)
|
||||
{
|
||||
nv_cap_t *cap = NULL;
|
||||
int minor;
|
||||
|
||||
cap = nv_cap_alloc(parent_cap, name);
|
||||
if (cap == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cap->parent = parent_cap->entry;
|
||||
cap->permissions = mode;
|
||||
|
||||
mode = (S_IFREG | S_IRUGO);
|
||||
|
||||
minor = nv_cap_find_minor(cap->path);
|
||||
if (minor < 0)
|
||||
{
|
||||
nv_cap_free(cap);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cap->minor = minor;
|
||||
|
||||
cap->entry = proc_create_data(name, mode, parent_cap->entry,
|
||||
&g_nv_cap_procfs_fops, (void*)cap);
|
||||
if (cap->entry == NULL)
|
||||
{
|
||||
nv_cap_free(cap);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap,
|
||||
const char *name, int mode)
|
||||
{
|
||||
nv_cap_t *cap = NULL;
|
||||
|
||||
cap = nv_cap_alloc(parent_cap, name);
|
||||
if (cap == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cap->parent = parent_cap->entry;
|
||||
cap->permissions = mode;
|
||||
cap->minor = -1;
|
||||
|
||||
mode = (S_IFDIR | S_IRUGO | S_IXUGO);
|
||||
|
||||
cap->entry = NV_PROC_MKDIR_MODE(name, mode, parent_cap->entry);
|
||||
if (cap->entry == NULL)
|
||||
{
|
||||
nv_cap_free(cap);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
nv_cap_t* NV_API_CALL nv_cap_init(const char *path)
|
||||
{
|
||||
nv_cap_t parent_cap;
|
||||
nv_cap_t *cap;
|
||||
int mode;
|
||||
char *name = NULL;
|
||||
char dir[] = "/capabilities";
|
||||
|
||||
if (path == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
NV_KMALLOC(name, (strlen(path) + strlen(dir)) + 1);
|
||||
if (name == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
strcpy(name, path);
|
||||
strcat(name, dir);
|
||||
parent_cap.entry = NULL;
|
||||
parent_cap.path = "";
|
||||
parent_cap.name = "";
|
||||
mode = S_IRUGO | S_IXUGO;
|
||||
cap = nv_cap_create_dir_entry(&parent_cap, name, mode);
|
||||
|
||||
NV_KFREE(name, strlen(name) + 1);
|
||||
return cap;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap)
|
||||
{
|
||||
if (WARN_ON(cap == NULL))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
remove_proc_entry(cap->name, cap->parent);
|
||||
nv_cap_free(cap);
|
||||
}
|
||||
|
||||
static int nv_cap_drv_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nv_cap_drv_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct file_operations g_nv_cap_drv_fops =
|
||||
{
|
||||
.owner = THIS_MODULE,
|
||||
.open = nv_cap_drv_open,
|
||||
.release = nv_cap_drv_release
|
||||
};
|
||||
|
||||
int NV_API_CALL nv_cap_drv_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
nv_cap_tables_init();
|
||||
|
||||
if (g_nv_cap_drv.initialized)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "nv-caps-drv is already initialized.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
rc = alloc_chrdev_region(&g_nv_cap_drv.devno,
|
||||
0,
|
||||
NV_CAP_DRV_MINOR_COUNT,
|
||||
"nvidia-caps");
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev region.\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
cdev_init(&g_nv_cap_drv.cdev, &g_nv_cap_drv_fops);
|
||||
|
||||
g_nv_cap_drv.cdev.owner = THIS_MODULE;
|
||||
|
||||
rc = cdev_add(&g_nv_cap_drv.cdev, g_nv_cap_drv.devno,
|
||||
NV_CAP_DRV_MINOR_COUNT);
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev.\n");
|
||||
goto cdev_add_fail;
|
||||
}
|
||||
|
||||
rc = nv_cap_procfs_init();
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "nv-caps-drv: unable to init proc\n");
|
||||
goto proc_init_fail;
|
||||
}
|
||||
|
||||
g_nv_cap_drv.initialized = NV_TRUE;
|
||||
|
||||
return 0;
|
||||
|
||||
proc_init_fail:
|
||||
cdev_del(&g_nv_cap_drv.cdev);
|
||||
|
||||
cdev_add_fail:
|
||||
unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_cap_drv_exit(void)
|
||||
{
|
||||
if (!g_nv_cap_drv.initialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv_cap_procfs_exit();
|
||||
|
||||
cdev_del(&g_nv_cap_drv.cdev);
|
||||
|
||||
unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT);
|
||||
|
||||
g_nv_cap_drv.initialized = NV_FALSE;
|
||||
}
|
||||
217
kernel-open/nvidia/nv-cray.c
Normal file
217
kernel-open/nvidia/nv-cray.c
Normal file
@@ -0,0 +1,217 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
#if defined(CONFIG_CRAY_XT)
|
||||
enum {
|
||||
NV_FORMAT_STATE_ORDINARY,
|
||||
NV_FORMAT_STATE_INTRODUCTION,
|
||||
NV_FORMAT_STATE_FLAGS,
|
||||
NV_FORMAT_STATE_FIELD_WIDTH,
|
||||
NV_FORMAT_STATE_PRECISION,
|
||||
NV_FORMAT_STATE_LENGTH_MODIFIER,
|
||||
NV_FORMAT_STATE_CONVERSION_SPECIFIER
|
||||
};
|
||||
|
||||
enum {
|
||||
NV_LENGTH_MODIFIER_NONE,
|
||||
NV_LENGTH_MODIFIER_CHAR,
|
||||
NV_LENGTH_MODIFIER_SHORT_INT,
|
||||
NV_LENGTH_MODIFIER_LONG_INT,
|
||||
NV_LENGTH_MODIFIER_LONG_LONG_INT
|
||||
};
|
||||
|
||||
#define NV_IS_FLAG(c) \
|
||||
((c) == '#' || (c) == '0' || (c) == '-' || (c) == ' ' || (c) == '+')
|
||||
#define NV_IS_LENGTH_MODIFIER(c) \
|
||||
((c) == 'h' || (c) == 'l' || (c) == 'L' || (c) == 'q' || (c) == 'j' || \
|
||||
(c) == 'z' || (c) == 't')
|
||||
#define NV_IS_CONVERSION_SPECIFIER(c) \
|
||||
((c) == 'd' || (c) == 'i' || (c) == 'o' || (c) == 'u' || (c) == 'x' || \
|
||||
(c) == 'X' || (c) == 'e' || (c) == 'E' || (c) == 'f' || (c) == 'F' || \
|
||||
(c) == 'g' || (c) == 'G' || (c) == 'a' || (c) == 'A' || (c) == 'c' || \
|
||||
(c) == 's' || (c) == 'p')
|
||||
|
||||
#define NV_MAX_NUM_INFO_MMRS 6
|
||||
|
||||
NV_STATUS nvos_forward_error_to_cray(
|
||||
struct pci_dev *dev,
|
||||
NvU32 error_number,
|
||||
const char *format,
|
||||
va_list ap
|
||||
)
|
||||
{
|
||||
NvU32 num_info_mmrs;
|
||||
NvU64 x = 0, info_mmrs[NV_MAX_NUM_INFO_MMRS];
|
||||
int state = NV_FORMAT_STATE_ORDINARY;
|
||||
int modifier = NV_LENGTH_MODIFIER_NONE;
|
||||
NvU32 i, n = 0, m = 0;
|
||||
|
||||
memset(info_mmrs, 0, sizeof(info_mmrs));
|
||||
while (*format != '\0')
|
||||
{
|
||||
switch (state)
|
||||
{
|
||||
case NV_FORMAT_STATE_ORDINARY:
|
||||
if (*format == '%')
|
||||
state = NV_FORMAT_STATE_INTRODUCTION;
|
||||
break;
|
||||
case NV_FORMAT_STATE_INTRODUCTION:
|
||||
if (*format == '%')
|
||||
{
|
||||
state = NV_FORMAT_STATE_ORDINARY;
|
||||
break;
|
||||
}
|
||||
case NV_FORMAT_STATE_FLAGS:
|
||||
if (NV_IS_FLAG(*format))
|
||||
{
|
||||
state = NV_FORMAT_STATE_FLAGS;
|
||||
break;
|
||||
}
|
||||
else if (*format == '*')
|
||||
{
|
||||
state = NV_FORMAT_STATE_FIELD_WIDTH;
|
||||
break;
|
||||
}
|
||||
case NV_FORMAT_STATE_FIELD_WIDTH:
|
||||
if ((*format >= '0') && (*format <= '9'))
|
||||
{
|
||||
state = NV_FORMAT_STATE_FIELD_WIDTH;
|
||||
break;
|
||||
}
|
||||
else if (*format == '.')
|
||||
{
|
||||
state = NV_FORMAT_STATE_PRECISION;
|
||||
break;
|
||||
}
|
||||
case NV_FORMAT_STATE_PRECISION:
|
||||
if ((*format >= '0') && (*format <= '9'))
|
||||
{
|
||||
state = NV_FORMAT_STATE_PRECISION;
|
||||
break;
|
||||
}
|
||||
else if (NV_IS_LENGTH_MODIFIER(*format))
|
||||
{
|
||||
state = NV_FORMAT_STATE_LENGTH_MODIFIER;
|
||||
break;
|
||||
}
|
||||
else if (NV_IS_CONVERSION_SPECIFIER(*format))
|
||||
{
|
||||
state = NV_FORMAT_STATE_CONVERSION_SPECIFIER;
|
||||
break;
|
||||
}
|
||||
case NV_FORMAT_STATE_LENGTH_MODIFIER:
|
||||
if ((*format == 'h') || (*format == 'l'))
|
||||
{
|
||||
state = NV_FORMAT_STATE_LENGTH_MODIFIER;
|
||||
break;
|
||||
}
|
||||
else if (NV_IS_CONVERSION_SPECIFIER(*format))
|
||||
{
|
||||
state = NV_FORMAT_STATE_CONVERSION_SPECIFIER;
|
||||
break;
|
||||
}
|
||||
}
|
||||
switch (state)
|
||||
{
|
||||
case NV_FORMAT_STATE_INTRODUCTION:
|
||||
modifier = NV_LENGTH_MODIFIER_NONE;
|
||||
break;
|
||||
case NV_FORMAT_STATE_LENGTH_MODIFIER:
|
||||
switch (*format)
|
||||
{
|
||||
case 'h':
|
||||
modifier = (modifier == NV_LENGTH_MODIFIER_NONE)
|
||||
? NV_LENGTH_MODIFIER_SHORT_INT
|
||||
: NV_LENGTH_MODIFIER_CHAR;
|
||||
break;
|
||||
case 'l':
|
||||
modifier = (modifier == NV_LENGTH_MODIFIER_NONE)
|
||||
? NV_LENGTH_MODIFIER_LONG_INT
|
||||
: NV_LENGTH_MODIFIER_LONG_LONG_INT;
|
||||
break;
|
||||
case 'q':
|
||||
modifier = NV_LENGTH_MODIFIER_LONG_LONG_INT;
|
||||
default:
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
break;
|
||||
case NV_FORMAT_STATE_CONVERSION_SPECIFIER:
|
||||
switch (*format)
|
||||
{
|
||||
case 'c':
|
||||
case 'd':
|
||||
case 'i':
|
||||
x = (unsigned int)va_arg(ap, int);
|
||||
break;
|
||||
case 'o':
|
||||
case 'u':
|
||||
case 'x':
|
||||
case 'X':
|
||||
switch (modifier)
|
||||
{
|
||||
case NV_LENGTH_MODIFIER_LONG_LONG_INT:
|
||||
x = va_arg(ap, unsigned long long int);
|
||||
break;
|
||||
case NV_LENGTH_MODIFIER_LONG_INT:
|
||||
x = va_arg(ap, unsigned long int);
|
||||
break;
|
||||
case NV_LENGTH_MODIFIER_CHAR:
|
||||
case NV_LENGTH_MODIFIER_SHORT_INT:
|
||||
case NV_LENGTH_MODIFIER_NONE:
|
||||
x = va_arg(ap, unsigned int);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
state = NV_FORMAT_STATE_ORDINARY;
|
||||
for (i = 0; i < ((modifier == NV_LENGTH_MODIFIER_LONG_LONG_INT)
|
||||
? 2 : 1); i++)
|
||||
{
|
||||
if (m == NV_MAX_NUM_INFO_MMRS)
|
||||
return NV_ERR_INSUFFICIENT_RESOURCES;
|
||||
info_mmrs[m] = ((info_mmrs[m] << 32) | (x & 0xffffffff));
|
||||
x >>= 32;
|
||||
if (++n == 2)
|
||||
{
|
||||
m++;
|
||||
n = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
format++;
|
||||
}
|
||||
|
||||
num_info_mmrs = (m + (n != 0));
|
||||
if (num_info_mmrs > 0)
|
||||
cray_nvidia_report_error(dev, error_number, num_info_mmrs, info_mmrs);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
#endif
|
||||
1305
kernel-open/nvidia/nv-dma.c
Normal file
1305
kernel-open/nvidia/nv-dma.c
Normal file
File diff suppressed because it is too large
Load Diff
896
kernel-open/nvidia/nv-dmabuf.c
Normal file
896
kernel-open/nvidia/nv-dmabuf.c
Normal file
@@ -0,0 +1,896 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include <linux/dma-buf.h>
|
||||
#include "nv-dmabuf.h"
|
||||
|
||||
|
||||
|
||||
#if defined(CONFIG_DMA_SHARED_BUFFER)
|
||||
typedef struct nv_dma_buf_mem_handle
|
||||
{
|
||||
NvHandle h_memory;
|
||||
NvU64 offset;
|
||||
NvU64 size;
|
||||
NvU64 bar1_va;
|
||||
} nv_dma_buf_mem_handle_t;
|
||||
|
||||
typedef struct nv_dma_buf_file_private
|
||||
{
|
||||
nv_state_t *nv;
|
||||
NvHandle h_client;
|
||||
NvHandle h_device;
|
||||
NvHandle h_subdevice;
|
||||
NvU32 total_objects;
|
||||
NvU32 num_objects;
|
||||
NvU64 total_size;
|
||||
NvU64 attached_size;
|
||||
struct mutex lock;
|
||||
nv_dma_buf_mem_handle_t *handles;
|
||||
NvU64 bar1_va_ref_count;
|
||||
void *mig_info;
|
||||
} nv_dma_buf_file_private_t;
|
||||
|
||||
static void
|
||||
nv_dma_buf_free_file_private(
|
||||
nv_dma_buf_file_private_t *priv
|
||||
)
|
||||
{
|
||||
if (priv == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (priv->handles != NULL)
|
||||
{
|
||||
NV_KFREE(priv->handles, priv->total_objects * sizeof(priv->handles[0]));
|
||||
priv->handles = NULL;
|
||||
}
|
||||
|
||||
mutex_destroy(&priv->lock);
|
||||
|
||||
NV_KFREE(priv, sizeof(nv_dma_buf_file_private_t));
|
||||
}
|
||||
|
||||
static nv_dma_buf_file_private_t*
|
||||
nv_dma_buf_alloc_file_private(
|
||||
NvU32 num_handles
|
||||
)
|
||||
{
|
||||
nv_dma_buf_file_private_t *priv = NULL;
|
||||
|
||||
NV_KMALLOC(priv, sizeof(nv_dma_buf_file_private_t));
|
||||
if (priv == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(priv, 0, sizeof(nv_dma_buf_file_private_t));
|
||||
|
||||
mutex_init(&priv->lock);
|
||||
|
||||
NV_KMALLOC(priv->handles, num_handles * sizeof(priv->handles[0]));
|
||||
if (priv->handles == NULL)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
memset(priv->handles, 0, num_handles * sizeof(priv->handles[0]));
|
||||
|
||||
return priv;
|
||||
|
||||
failed:
|
||||
nv_dma_buf_free_file_private(priv);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Must be called with RMAPI lock and GPU lock taken
|
||||
static void
|
||||
nv_dma_buf_undup_mem_handles_unlocked(
|
||||
nvidia_stack_t *sp,
|
||||
NvU32 index,
|
||||
NvU32 num_objects,
|
||||
nv_dma_buf_file_private_t *priv
|
||||
)
|
||||
{
|
||||
NvU32 i = 0;
|
||||
|
||||
for (i = index; i < num_objects; i++)
|
||||
{
|
||||
if (priv->handles[i].h_memory == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
rm_dma_buf_undup_mem_handle(sp, priv->nv, priv->h_client,
|
||||
priv->handles[i].h_memory);
|
||||
|
||||
priv->attached_size -= priv->handles[i].size;
|
||||
priv->handles[i].h_memory = 0;
|
||||
priv->handles[i].offset = 0;
|
||||
priv->handles[i].size = 0;
|
||||
priv->num_objects--;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_undup_mem_handles(
|
||||
nvidia_stack_t *sp,
|
||||
NvU32 index,
|
||||
NvU32 num_objects,
|
||||
nv_dma_buf_file_private_t *priv
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
|
||||
status = rm_acquire_api_lock(sp);
|
||||
if (WARN_ON(status != NV_OK))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
status = rm_acquire_all_gpus_lock(sp);
|
||||
if (WARN_ON(status != NV_OK))
|
||||
{
|
||||
goto unlock_api_lock;
|
||||
}
|
||||
|
||||
nv_dma_buf_undup_mem_handles_unlocked(sp, index, num_objects, priv);
|
||||
|
||||
rm_release_all_gpus_lock(sp);
|
||||
|
||||
unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
}
|
||||
|
||||
static NV_STATUS
|
||||
nv_dma_buf_dup_mem_handles(
|
||||
nvidia_stack_t *sp,
|
||||
nv_dma_buf_file_private_t *priv,
|
||||
nv_ioctl_export_to_dma_buf_fd_t *params
|
||||
)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
NvU32 index = params->index;
|
||||
NvU32 count = 0;
|
||||
NvU32 i = 0;
|
||||
|
||||
status = rm_acquire_api_lock(sp);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
return status;
|
||||
}
|
||||
|
||||
status = rm_acquire_gpu_lock(sp, priv->nv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto unlock_api_lock;
|
||||
}
|
||||
|
||||
for (i = 0; i < params->numObjects; i++)
|
||||
{
|
||||
NvHandle h_memory_duped = 0;
|
||||
|
||||
if (priv->handles[index].h_memory != 0)
|
||||
{
|
||||
status = NV_ERR_IN_USE;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (params->sizes[i] > priv->total_size - priv->attached_size)
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
status = rm_dma_buf_dup_mem_handle(sp, priv->nv,
|
||||
params->hClient,
|
||||
priv->h_client,
|
||||
priv->h_device,
|
||||
priv->h_subdevice,
|
||||
priv->mig_info,
|
||||
params->handles[i],
|
||||
params->offsets[i],
|
||||
params->sizes[i],
|
||||
&h_memory_duped);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
priv->attached_size += params->sizes[i];
|
||||
priv->handles[index].h_memory = h_memory_duped;
|
||||
priv->handles[index].offset = params->offsets[i];
|
||||
priv->handles[index].size = params->sizes[i];
|
||||
priv->num_objects++;
|
||||
index++;
|
||||
count++;
|
||||
}
|
||||
|
||||
if ((priv->num_objects == priv->total_objects) &&
|
||||
(priv->attached_size != priv->total_size))
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
return NV_OK;
|
||||
|
||||
failed:
|
||||
nv_dma_buf_undup_mem_handles_unlocked(sp, params->index, count, priv);
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
// Must be called with RMAPI lock and GPU lock taken
|
||||
static void
|
||||
nv_dma_buf_unmap_unlocked(
|
||||
nvidia_stack_t *sp,
|
||||
nv_dma_device_t *peer_dma_dev,
|
||||
nv_dma_buf_file_private_t *priv,
|
||||
struct sg_table *sgt,
|
||||
NvU32 count
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
NvU32 i;
|
||||
NvU64 dma_len;
|
||||
NvU64 dma_addr;
|
||||
NvU64 bar1_va;
|
||||
NvBool bar1_unmap_needed;
|
||||
struct scatterlist *sg = NULL;
|
||||
|
||||
bar1_unmap_needed = (priv->bar1_va_ref_count == 0);
|
||||
|
||||
for_each_sg(sgt->sgl, sg, count, i)
|
||||
{
|
||||
dma_addr = sg_dma_address(sg);
|
||||
dma_len = priv->handles[i].size;
|
||||
bar1_va = priv->handles[i].bar1_va;
|
||||
|
||||
WARN_ON(sg_dma_len(sg) != priv->handles[i].size);
|
||||
|
||||
nv_dma_unmap_peer(peer_dma_dev, (dma_len / os_page_size), dma_addr);
|
||||
|
||||
if (bar1_unmap_needed)
|
||||
{
|
||||
status = rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client,
|
||||
priv->handles[i].h_memory,
|
||||
priv->handles[i].size,
|
||||
priv->handles[i].bar1_va);
|
||||
WARN_ON(status != NV_OK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct sg_table*
|
||||
nv_dma_buf_map(
|
||||
struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction direction
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
struct scatterlist *sg = NULL;
|
||||
struct sg_table *sgt = NULL;
|
||||
struct dma_buf *buf = attachment->dmabuf;
|
||||
struct device *dev = attachment->dev;
|
||||
nv_dma_buf_file_private_t *priv = buf->priv;
|
||||
nv_dma_device_t peer_dma_dev = {{ 0 }};
|
||||
NvBool bar1_map_needed;
|
||||
NvBool bar1_unmap_needed;
|
||||
NvU32 count = 0;
|
||||
NvU32 i = 0;
|
||||
int rc = 0;
|
||||
|
||||
//
|
||||
// We support importers that are able to handle MMIO resources
|
||||
// not backed by struct page. This will need to be revisited
|
||||
// when dma-buf support for P9 will be added.
|
||||
//
|
||||
#if defined(NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT) && \
|
||||
defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER)
|
||||
if (dma_buf_attachment_is_dynamic(attachment) &&
|
||||
!attachment->peer2peer)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: failed to map dynamic attachment with no P2P support\n");
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
if (priv->num_objects != priv->total_objects)
|
||||
{
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
status = rm_acquire_api_lock(sp);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto free_sp;
|
||||
}
|
||||
|
||||
status = rm_acquire_gpu_lock(sp, priv->nv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto unlock_api_lock;
|
||||
}
|
||||
|
||||
NV_KMALLOC(sgt, sizeof(struct sg_table));
|
||||
if (sgt == NULL)
|
||||
{
|
||||
goto unlock_gpu_lock;
|
||||
}
|
||||
|
||||
memset(sgt, 0, sizeof(struct sg_table));
|
||||
|
||||
//
|
||||
// RM currently returns contiguous BAR1, so we create as many
|
||||
// sg entries as the number of handles being mapped.
|
||||
// When RM can alloc discontiguous BAR1, this code will need to be revisited.
|
||||
//
|
||||
rc = sg_alloc_table(sgt, priv->num_objects, GFP_KERNEL);
|
||||
if (rc != 0)
|
||||
{
|
||||
goto free_sgt;
|
||||
}
|
||||
|
||||
peer_dma_dev.dev = dev;
|
||||
peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask;
|
||||
bar1_map_needed = bar1_unmap_needed = (priv->bar1_va_ref_count == 0);
|
||||
|
||||
for_each_sg(sgt->sgl, sg, priv->num_objects, i)
|
||||
{
|
||||
NvU64 dma_addr;
|
||||
NvU64 dma_len;
|
||||
|
||||
if (bar1_map_needed)
|
||||
{
|
||||
status = rm_dma_buf_map_mem_handle(sp, priv->nv, priv->h_client,
|
||||
priv->handles[i].h_memory,
|
||||
priv->handles[i].offset,
|
||||
priv->handles[i].size,
|
||||
&priv->handles[i].bar1_va);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto unmap_handles;
|
||||
}
|
||||
}
|
||||
|
||||
dma_addr = priv->handles[i].bar1_va;
|
||||
dma_len = priv->handles[i].size;
|
||||
|
||||
status = nv_dma_map_peer(&peer_dma_dev, priv->nv->dma_dev,
|
||||
0x1, (dma_len / os_page_size), &dma_addr);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
if (bar1_unmap_needed)
|
||||
{
|
||||
// Unmap the recently mapped memory handle
|
||||
(void) rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client,
|
||||
priv->handles[i].h_memory,
|
||||
priv->handles[i].size,
|
||||
priv->handles[i].bar1_va);
|
||||
}
|
||||
|
||||
// Unmap remaining memory handles
|
||||
goto unmap_handles;
|
||||
}
|
||||
|
||||
sg_set_page(sg, NULL, dma_len, 0);
|
||||
sg_dma_address(sg) = (dma_addr_t)dma_addr;
|
||||
sg_dma_len(sg) = dma_len;
|
||||
count++;
|
||||
}
|
||||
|
||||
priv->bar1_va_ref_count++;
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return sgt;
|
||||
|
||||
unmap_handles:
|
||||
nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, count);
|
||||
|
||||
sg_free_table(sgt);
|
||||
|
||||
free_sgt:
|
||||
NV_KFREE(sgt, sizeof(struct sg_table));
|
||||
|
||||
unlock_gpu_lock:
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
free_sp:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
unlock_priv:
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_unmap(
|
||||
struct dma_buf_attachment *attachment,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction direction
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
struct dma_buf *buf = attachment->dmabuf;
|
||||
struct device *dev = attachment->dev;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
nv_dma_buf_file_private_t *priv = buf->priv;
|
||||
nv_dma_device_t peer_dma_dev = {{ 0 }};
|
||||
int rc = 0;
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
if (priv->num_objects != priv->total_objects)
|
||||
{
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (WARN_ON(rc != 0))
|
||||
{
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
status = rm_acquire_api_lock(sp);
|
||||
if (WARN_ON(status != NV_OK))
|
||||
{
|
||||
goto free_sp;
|
||||
}
|
||||
|
||||
status = rm_acquire_gpu_lock(sp, priv->nv);
|
||||
if (WARN_ON(status != NV_OK))
|
||||
{
|
||||
goto unlock_api_lock;
|
||||
}
|
||||
|
||||
peer_dma_dev.dev = dev;
|
||||
peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask;
|
||||
|
||||
priv->bar1_va_ref_count--;
|
||||
|
||||
nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, priv->num_objects);
|
||||
|
||||
sg_free_table(sgt);
|
||||
|
||||
NV_KFREE(sgt, sizeof(struct sg_table));
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
free_sp:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
unlock_priv:
|
||||
mutex_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_release(
|
||||
struct dma_buf *buf
|
||||
)
|
||||
{
|
||||
int rc = 0;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
nv_dma_buf_file_private_t *priv = buf->priv;
|
||||
nv_state_t *nv;
|
||||
|
||||
if (priv == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv = priv->nv;
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (WARN_ON(rc != 0))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv);
|
||||
|
||||
rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device,
|
||||
priv->h_subdevice, priv->mig_info);
|
||||
|
||||
nv_dma_buf_free_file_private(priv);
|
||||
buf->priv = NULL;
|
||||
|
||||
nvidia_dev_put(nv->gpu_id, sp);
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int
|
||||
nv_dma_buf_mmap(
|
||||
struct dma_buf *buf,
|
||||
struct vm_area_struct *vma
|
||||
)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP) || \
|
||||
defined(NV_DMA_BUF_OPS_HAS_MAP)
|
||||
static void*
|
||||
nv_dma_buf_kmap_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num
|
||||
)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_kunmap_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num,
|
||||
void *addr
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC) || \
|
||||
defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC)
|
||||
static void*
|
||||
nv_dma_buf_kmap_atomic_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num
|
||||
)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_kunmap_atomic_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num,
|
||||
void *addr
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
//
|
||||
// Note: Some of the dma-buf operations are mandatory in some kernels.
|
||||
// So stubs are added to prevent dma_buf_export() failure.
|
||||
// The actual implementations of these interfaces is not really required
|
||||
// for the export operation to work.
|
||||
//
|
||||
// Same functions are used for kmap*/map* because of this commit:
|
||||
// f9b67f0014cb: dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic
|
||||
//
|
||||
static const struct dma_buf_ops nv_dma_buf_ops = {
|
||||
.map_dma_buf = nv_dma_buf_map,
|
||||
.unmap_dma_buf = nv_dma_buf_unmap,
|
||||
.release = nv_dma_buf_release,
|
||||
.mmap = nv_dma_buf_mmap,
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP)
|
||||
.kmap = nv_dma_buf_kmap_stub,
|
||||
.kunmap = nv_dma_buf_kunmap_stub,
|
||||
#endif
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC)
|
||||
.kmap_atomic = nv_dma_buf_kmap_atomic_stub,
|
||||
.kunmap_atomic = nv_dma_buf_kunmap_atomic_stub,
|
||||
#endif
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_MAP)
|
||||
.map = nv_dma_buf_kmap_stub,
|
||||
.unmap = nv_dma_buf_kunmap_stub,
|
||||
#endif
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC)
|
||||
.map_atomic = nv_dma_buf_kmap_atomic_stub,
|
||||
.unmap_atomic = nv_dma_buf_kunmap_atomic_stub,
|
||||
#endif
|
||||
};
|
||||
|
||||
static NV_STATUS
|
||||
nv_dma_buf_create(
|
||||
nv_state_t *nv,
|
||||
nv_ioctl_export_to_dma_buf_fd_t *params
|
||||
)
|
||||
{
|
||||
int rc = 0;
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
struct dma_buf *buf = NULL;
|
||||
nv_dma_buf_file_private_t *priv = NULL;
|
||||
NvU32 gpu_id = nv->gpu_id;
|
||||
|
||||
if (!nv->dma_buf_supported)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
if (params->index > (params->totalObjects - params->numObjects))
|
||||
{
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
priv = nv_dma_buf_alloc_file_private(params->totalObjects);
|
||||
if (priv == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate dma-buf private\n");
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
priv->total_objects = params->totalObjects;
|
||||
priv->total_size = params->totalSize;
|
||||
priv->nv = nv;
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
goto cleanup_priv;
|
||||
}
|
||||
|
||||
rc = nvidia_dev_get(gpu_id, sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto cleanup_sp;
|
||||
}
|
||||
|
||||
status = rm_dma_buf_get_client_and_device(sp, priv->nv,
|
||||
params->hClient,
|
||||
&priv->h_client,
|
||||
&priv->h_device,
|
||||
&priv->h_subdevice,
|
||||
&priv->mig_info);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto cleanup_device;
|
||||
}
|
||||
|
||||
status = nv_dma_buf_dup_mem_handles(sp, priv, params);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto cleanup_client_and_device;
|
||||
}
|
||||
|
||||
#if (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 1)
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &nv_dma_buf_ops;
|
||||
exp_info.size = params->totalSize;
|
||||
exp_info.flags = O_RDWR | O_CLOEXEC;
|
||||
exp_info.priv = priv;
|
||||
|
||||
buf = dma_buf_export(&exp_info);
|
||||
}
|
||||
#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 4)
|
||||
buf = dma_buf_export(priv, &nv_dma_buf_ops,
|
||||
params->totalSize, O_RDWR | O_CLOEXEC);
|
||||
#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 5)
|
||||
buf = dma_buf_export(priv, &nv_dma_buf_ops,
|
||||
params->totalSize, O_RDWR | O_CLOEXEC, NULL);
|
||||
#endif
|
||||
|
||||
if (IS_ERR(buf))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to create dma-buf\n");
|
||||
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
|
||||
goto cleanup_handles;
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
rc = dma_buf_fd(buf, O_RDWR | O_CLOEXEC);
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf file descriptor\n");
|
||||
|
||||
//
|
||||
// If dma-buf is successfully created, the dup'd handles
|
||||
// clean-up should be done by the release callback.
|
||||
//
|
||||
dma_buf_put(buf);
|
||||
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
}
|
||||
|
||||
params->fd = rc;
|
||||
|
||||
return NV_OK;
|
||||
|
||||
cleanup_handles:
|
||||
nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv);
|
||||
|
||||
cleanup_client_and_device:
|
||||
rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device,
|
||||
priv->h_subdevice, priv->mig_info);
|
||||
|
||||
cleanup_device:
|
||||
nvidia_dev_put(gpu_id, sp);
|
||||
|
||||
cleanup_sp:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
cleanup_priv:
|
||||
nv_dma_buf_free_file_private(priv);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static NV_STATUS
|
||||
nv_dma_buf_reuse(
|
||||
nv_state_t *nv,
|
||||
nv_ioctl_export_to_dma_buf_fd_t *params
|
||||
)
|
||||
{
|
||||
int rc = 0;
|
||||
NV_STATUS status = NV_OK;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
struct dma_buf *buf = NULL;
|
||||
nv_dma_buf_file_private_t *priv = NULL;
|
||||
|
||||
buf = dma_buf_get(params->fd);
|
||||
if (IS_ERR(buf))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf\n");
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
}
|
||||
|
||||
priv = buf->priv;
|
||||
|
||||
if (priv == NULL)
|
||||
{
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto cleanup_dmabuf;
|
||||
}
|
||||
|
||||
rc = mutex_lock_interruptible(&priv->lock);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto cleanup_dmabuf;
|
||||
}
|
||||
|
||||
if (params->index > (priv->total_objects - params->numObjects))
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
status = nv_dma_buf_dup_mem_handles(sp, priv, params);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto cleanup_sp;
|
||||
}
|
||||
|
||||
cleanup_sp:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
unlock_priv:
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
cleanup_dmabuf:
|
||||
dma_buf_put(buf);
|
||||
|
||||
return status;
|
||||
}
|
||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
||||
|
||||
NV_STATUS
|
||||
nv_dma_buf_export(
|
||||
nv_state_t *nv,
|
||||
nv_ioctl_export_to_dma_buf_fd_t *params
|
||||
)
|
||||
{
|
||||
#if defined(CONFIG_DMA_SHARED_BUFFER)
|
||||
NV_STATUS status;
|
||||
|
||||
if ((params == NULL) ||
|
||||
(params->totalSize == 0) ||
|
||||
(params->numObjects == 0) ||
|
||||
(params->totalObjects == 0) ||
|
||||
(params->numObjects > NV_DMABUF_EXPORT_MAX_HANDLES) ||
|
||||
(params->numObjects > params->totalObjects))
|
||||
{
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
//
|
||||
// If fd >= 0, dma-buf already exists with this fd, so get dma-buf from fd.
|
||||
// If fd == -1, dma-buf is not created yet, so create it and then store
|
||||
// additional handles.
|
||||
//
|
||||
if (params->fd == -1)
|
||||
{
|
||||
status = nv_dma_buf_create(nv, params);
|
||||
}
|
||||
else if (params->fd >= 0)
|
||||
{
|
||||
status = nv_dma_buf_reuse(nv, params);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
return status;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
412
kernel-open/nvidia/nv-frontend.c
Normal file
412
kernel-open/nvidia/nv-frontend.c
Normal file
@@ -0,0 +1,412 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-reg.h"
|
||||
#include "nv-frontend.h"
|
||||
|
||||
#if defined(MODULE_LICENSE)
|
||||
|
||||
MODULE_LICENSE("Dual MIT/GPL");
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
#if defined(MODULE_INFO)
|
||||
MODULE_INFO(supported, "external");
|
||||
#endif
|
||||
#if defined(MODULE_VERSION)
|
||||
MODULE_VERSION(NV_VERSION_STRING);
|
||||
#endif
|
||||
|
||||
#ifdef MODULE_ALIAS_CHARDEV_MAJOR
|
||||
MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MODULE_IMPORT_NS() is added by commit id 8651ec01daeda
|
||||
* ("module: add support for symbol namespaces") in 5.4
|
||||
*/
|
||||
#if defined(MODULE_IMPORT_NS)
|
||||
|
||||
|
||||
/*
|
||||
* DMA_BUF namespace is added by commit id 16b0314aa746
|
||||
* ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16
|
||||
*/
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
static NvU32 nv_num_instances;
|
||||
|
||||
// lock required to protect table.
|
||||
struct semaphore nv_module_table_lock;
|
||||
|
||||
// minor number table
|
||||
nvidia_module_t *nv_minor_num_table[NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX + 1];
|
||||
|
||||
int nvidia_init_module(void);
|
||||
void nvidia_exit_module(void);
|
||||
|
||||
/* EXPORTS to Linux Kernel */
|
||||
|
||||
int nvidia_frontend_open(struct inode *, struct file *);
|
||||
int nvidia_frontend_close(struct inode *, struct file *);
|
||||
unsigned int nvidia_frontend_poll(struct file *, poll_table *);
|
||||
int nvidia_frontend_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
|
||||
long nvidia_frontend_unlocked_ioctl(struct file *, unsigned int, unsigned long);
|
||||
long nvidia_frontend_compat_ioctl(struct file *, unsigned int, unsigned long);
|
||||
int nvidia_frontend_mmap(struct file *, struct vm_area_struct *);
|
||||
|
||||
/* character driver entry points */
|
||||
static struct file_operations nv_frontend_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.poll = nvidia_frontend_poll,
|
||||
#if defined(NV_FILE_OPERATIONS_HAS_IOCTL)
|
||||
.ioctl = nvidia_frontend_ioctl,
|
||||
#endif
|
||||
.unlocked_ioctl = nvidia_frontend_unlocked_ioctl,
|
||||
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
|
||||
.compat_ioctl = nvidia_frontend_compat_ioctl,
|
||||
#endif
|
||||
.mmap = nvidia_frontend_mmap,
|
||||
.open = nvidia_frontend_open,
|
||||
.release = nvidia_frontend_close,
|
||||
};
|
||||
|
||||
/* Helper functions */
|
||||
|
||||
static int add_device(nvidia_module_t *module, nv_linux_state_t *device, NvBool all)
|
||||
{
|
||||
NvU32 i;
|
||||
int rc = -1;
|
||||
|
||||
// look for free a minor number and assign unique minor number to this device
|
||||
for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++)
|
||||
{
|
||||
if (nv_minor_num_table[i] == NULL)
|
||||
{
|
||||
nv_minor_num_table[i] = module;
|
||||
device->minor_num = i;
|
||||
if (all == NV_TRUE)
|
||||
{
|
||||
device = device->next;
|
||||
if (device == NULL)
|
||||
{
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int remove_device(nvidia_module_t *module, nv_linux_state_t *device)
|
||||
{
|
||||
int rc = -1;
|
||||
|
||||
// remove this device from minor_number table
|
||||
if ((device != NULL) && (nv_minor_num_table[device->minor_num] != NULL))
|
||||
{
|
||||
nv_minor_num_table[device->minor_num] = NULL;
|
||||
device->minor_num = 0;
|
||||
rc = 0;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Export functions */
|
||||
|
||||
int nvidia_register_module(nvidia_module_t *module)
|
||||
{
|
||||
int rc = 0;
|
||||
NvU32 ctrl_minor_num;
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
if (module->instance >= NV_MAX_MODULE_INSTANCES)
|
||||
{
|
||||
printk("NVRM: NVIDIA module instance %d registration failed.\n",
|
||||
module->instance);
|
||||
rc = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
|
||||
nv_minor_num_table[ctrl_minor_num] = module;
|
||||
nv_num_instances++;
|
||||
done:
|
||||
up(&nv_module_table_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_register_module);
|
||||
|
||||
int nvidia_unregister_module(nvidia_module_t *module)
|
||||
{
|
||||
int rc = 0;
|
||||
NvU32 ctrl_minor_num;
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
|
||||
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
|
||||
if (nv_minor_num_table[ctrl_minor_num] == NULL)
|
||||
{
|
||||
printk("NVRM: NVIDIA module for %d instance does not exist\n",
|
||||
module->instance);
|
||||
rc = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_minor_num_table[ctrl_minor_num] = NULL;
|
||||
nv_num_instances--;
|
||||
}
|
||||
|
||||
up(&nv_module_table_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_unregister_module);
|
||||
|
||||
int nvidia_frontend_add_device(nvidia_module_t *module, nv_linux_state_t * device)
|
||||
{
|
||||
int rc = -1;
|
||||
NvU32 ctrl_minor_num;
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
|
||||
if (nv_minor_num_table[ctrl_minor_num] == NULL)
|
||||
{
|
||||
printk("NVRM: NVIDIA module for %d instance does not exist\n",
|
||||
module->instance);
|
||||
rc = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = add_device(module, device, NV_FALSE);
|
||||
}
|
||||
up(&nv_module_table_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_frontend_add_device);
|
||||
|
||||
int nvidia_frontend_remove_device(nvidia_module_t *module, nv_linux_state_t * device)
|
||||
{
|
||||
int rc = 0;
|
||||
NvU32 ctrl_minor_num;
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
|
||||
if (nv_minor_num_table[ctrl_minor_num] == NULL)
|
||||
{
|
||||
printk("NVRM: NVIDIA module for %d instance does not exist\n",
|
||||
module->instance);
|
||||
rc = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = remove_device(module, device);
|
||||
}
|
||||
up(&nv_module_table_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_frontend_remove_device);
|
||||
|
||||
int nvidia_frontend_open(
|
||||
struct inode *inode,
|
||||
struct file *file
|
||||
)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
nvidia_module_t *module = NULL;
|
||||
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->open != NULL))
|
||||
{
|
||||
// Increment the reference count of module to ensure that module does
|
||||
// not get unloaded if its corresponding device file is open, for
|
||||
// example nvidiaN.ko should not get unloaded if /dev/nvidiaN is open.
|
||||
if (!try_module_get(module->owner))
|
||||
{
|
||||
up(&nv_module_table_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
rc = module->open(inode, file);
|
||||
if (rc < 0)
|
||||
{
|
||||
module_put(module->owner);
|
||||
}
|
||||
}
|
||||
|
||||
up(&nv_module_table_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int nvidia_frontend_close(
|
||||
struct inode *inode,
|
||||
struct file *file
|
||||
)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
nvidia_module_t *module = NULL;
|
||||
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
|
||||
module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->close != NULL))
|
||||
{
|
||||
rc = module->close(inode, file);
|
||||
|
||||
// Decrement the reference count of module.
|
||||
module_put(module->owner);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
unsigned int nvidia_frontend_poll(
|
||||
struct file *file,
|
||||
poll_table *wait
|
||||
)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
struct inode *inode = NV_FILE_INODE(file);
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
nvidia_module_t *module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->poll != NULL))
|
||||
mask = module->poll(file, wait);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
int nvidia_frontend_ioctl(
|
||||
struct inode *inode,
|
||||
struct file *file,
|
||||
unsigned int cmd,
|
||||
unsigned long i_arg)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
nvidia_module_t *module = NULL;
|
||||
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->ioctl != NULL))
|
||||
rc = module->ioctl(inode, file, cmd, i_arg);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
long nvidia_frontend_unlocked_ioctl(
|
||||
struct file *file,
|
||||
unsigned int cmd,
|
||||
unsigned long i_arg
|
||||
)
|
||||
{
|
||||
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
|
||||
}
|
||||
|
||||
long nvidia_frontend_compat_ioctl(
|
||||
struct file *file,
|
||||
unsigned int cmd,
|
||||
unsigned long i_arg
|
||||
)
|
||||
{
|
||||
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
|
||||
}
|
||||
|
||||
int nvidia_frontend_mmap(
|
||||
struct file *file,
|
||||
struct vm_area_struct *vma
|
||||
)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
struct inode *inode = NV_FILE_INODE(file);
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
nvidia_module_t *module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->mmap != NULL))
|
||||
rc = module->mmap(file, vma);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __init nvidia_frontend_init_module(void)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
// initialise nvidia module table;
|
||||
nv_num_instances = 0;
|
||||
memset(nv_minor_num_table, 0, sizeof(nv_minor_num_table));
|
||||
NV_INIT_MUTEX(&nv_module_table_lock);
|
||||
|
||||
status = nvidia_init_module();
|
||||
if (status < 0)
|
||||
{
|
||||
return status;
|
||||
}
|
||||
|
||||
// register char device
|
||||
status = register_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend", &nv_frontend_fops);
|
||||
if (status < 0)
|
||||
{
|
||||
printk("NVRM: register_chrdev() failed!\n");
|
||||
nvidia_exit_module();
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __exit nvidia_frontend_exit_module(void)
|
||||
{
|
||||
/*
|
||||
* If this is the last nvidia_module to be unregistered, cleanup and
|
||||
* unregister char dev
|
||||
*/
|
||||
if (nv_num_instances == 1)
|
||||
{
|
||||
unregister_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend");
|
||||
}
|
||||
|
||||
nvidia_exit_module();
|
||||
}
|
||||
|
||||
module_init(nvidia_frontend_init_module);
|
||||
module_exit(nvidia_frontend_exit_module);
|
||||
|
||||
47
kernel-open/nvidia/nv-frontend.h
Normal file
47
kernel-open/nvidia/nv-frontend.h
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NV_FRONTEND_H_
|
||||
#define _NV_FRONTEND_H_
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-register-module.h"
|
||||
|
||||
#define NV_MAX_MODULE_INSTANCES 8
|
||||
|
||||
#define NV_FRONTEND_MINOR_NUMBER(x) minor((x)->i_rdev)
|
||||
|
||||
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX 255
|
||||
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN (NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - \
|
||||
NV_MAX_MODULE_INSTANCES)
|
||||
|
||||
#define NV_FRONTEND_IS_CONTROL_DEVICE(x) ((x <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX) && \
|
||||
(x > NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN))
|
||||
|
||||
int nvidia_frontend_add_device(nvidia_module_t *, nv_linux_state_t *);
|
||||
int nvidia_frontend_remove_device(nvidia_module_t *, nv_linux_state_t *);
|
||||
|
||||
extern nvidia_module_t *nv_minor_num_table[];
|
||||
|
||||
#endif
|
||||
552
kernel-open/nvidia/nv-i2c.c
Normal file
552
kernel-open/nvidia/nv-i2c.c
Normal file
@@ -0,0 +1,552 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2005-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include <linux/i2c.h>
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
|
||||
|
||||
static int nv_i2c_algo_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num)
|
||||
{
|
||||
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
|
||||
unsigned int i = 0;
|
||||
int rc;
|
||||
NV_STATUS rmStatus = NV_OK;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
const unsigned int supported_i2c_flags = I2C_M_RD
|
||||
#if defined(I2C_M_DMA_SAFE)
|
||||
| I2C_M_DMA_SAFE
|
||||
#endif
|
||||
;
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = -EIO;
|
||||
|
||||
for (i = 0; ((i < (unsigned int)num) && (rmStatus == NV_OK)); i++)
|
||||
{
|
||||
if (msgs[i].flags & ~supported_i2c_flags)
|
||||
{
|
||||
/* we only support basic I2C reads/writes, reject any other commands */
|
||||
rc = -EINVAL;
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Unsupported I2C flags used. (flags:0x%08x)\n",
|
||||
msgs[i].flags);
|
||||
rmStatus = NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
else
|
||||
{
|
||||
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
|
||||
(msgs[i].flags & I2C_M_RD) ?
|
||||
NV_I2C_CMD_READ : NV_I2C_CMD_WRITE,
|
||||
(NvU8)(msgs[i].addr & 0x7f), 0,
|
||||
(NvU32)(msgs[i].len & 0xffffUL),
|
||||
(NvU8 *)msgs[i].buf);
|
||||
}
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return (rmStatus != NV_OK) ? rc : num;
|
||||
}
|
||||
|
||||
static int nv_i2c_algo_smbus_xfer(
|
||||
struct i2c_adapter *adapter,
|
||||
u16 addr,
|
||||
unsigned short flags,
|
||||
char read_write,
|
||||
u8 command,
|
||||
int size,
|
||||
union i2c_smbus_data *data
|
||||
)
|
||||
{
|
||||
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
|
||||
int rc;
|
||||
NV_STATUS rmStatus = NV_OK;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = -EIO;
|
||||
|
||||
switch (size)
|
||||
{
|
||||
case I2C_SMBUS_QUICK:
|
||||
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
|
||||
(read_write == I2C_SMBUS_READ) ?
|
||||
NV_I2C_CMD_SMBUS_QUICK_READ :
|
||||
NV_I2C_CMD_SMBUS_QUICK_WRITE,
|
||||
(NvU8)(addr & 0x7f), 0, 0, NULL);
|
||||
break;
|
||||
|
||||
case I2C_SMBUS_BYTE:
|
||||
if (read_write == I2C_SMBUS_READ)
|
||||
{
|
||||
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
|
||||
NV_I2C_CMD_READ,
|
||||
(NvU8)(addr & 0x7f), 0, 1,
|
||||
(NvU8 *)&data->byte);
|
||||
}
|
||||
else
|
||||
{
|
||||
u8 data = command;
|
||||
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
|
||||
NV_I2C_CMD_WRITE,
|
||||
(NvU8)(addr & 0x7f), 0, 1,
|
||||
(NvU8 *)&data);
|
||||
}
|
||||
break;
|
||||
|
||||
case I2C_SMBUS_BYTE_DATA:
|
||||
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
|
||||
(read_write == I2C_SMBUS_READ) ?
|
||||
NV_I2C_CMD_SMBUS_READ :
|
||||
NV_I2C_CMD_SMBUS_WRITE,
|
||||
(NvU8)(addr & 0x7f), (NvU8)command, 1,
|
||||
(NvU8 *)&data->byte);
|
||||
break;
|
||||
|
||||
case I2C_SMBUS_WORD_DATA:
|
||||
if (read_write != I2C_SMBUS_READ)
|
||||
{
|
||||
data->block[1] = (data->word & 0xff);
|
||||
data->block[2] = (data->word >> 8);
|
||||
}
|
||||
|
||||
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
|
||||
(read_write == I2C_SMBUS_READ) ?
|
||||
NV_I2C_CMD_SMBUS_READ :
|
||||
NV_I2C_CMD_SMBUS_WRITE,
|
||||
(NvU8)(addr & 0x7f), (NvU8)command, 2,
|
||||
(NvU8 *)&data->block[1]);
|
||||
|
||||
if (read_write == I2C_SMBUS_READ)
|
||||
{
|
||||
data->word = ((NvU16)data->block[1]) |
|
||||
((NvU16)data->block[2] << 8);
|
||||
}
|
||||
break;
|
||||
|
||||
case I2C_SMBUS_BLOCK_DATA:
|
||||
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
|
||||
(read_write == I2C_SMBUS_READ) ?
|
||||
NV_I2C_CMD_SMBUS_BLOCK_READ :
|
||||
NV_I2C_CMD_SMBUS_BLOCK_WRITE,
|
||||
(NvU8)(addr & 0x7f), (NvU8)command,
|
||||
sizeof(data->block),
|
||||
(NvU8 *)data->block);
|
||||
break;
|
||||
default:
|
||||
rc = -EINVAL;
|
||||
rmStatus = NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return (rmStatus != NV_OK) ? rc : 0;
|
||||
}
|
||||
|
||||
static u32 nv_i2c_algo_functionality(struct i2c_adapter *adapter)
|
||||
{
|
||||
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
|
||||
u32 ret = I2C_FUNC_I2C;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
|
||||
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rm_i2c_is_smbus_capable(sp, nv, adapter))
|
||||
{
|
||||
ret |= (I2C_FUNC_SMBUS_QUICK |
|
||||
I2C_FUNC_SMBUS_BYTE |
|
||||
I2C_FUNC_SMBUS_BYTE_DATA |
|
||||
I2C_FUNC_SMBUS_WORD_DATA |
|
||||
I2C_FUNC_SMBUS_BLOCK_DATA);
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct i2c_algorithm nv_i2c_algo = {
|
||||
.master_xfer = nv_i2c_algo_master_xfer,
|
||||
.smbus_xfer = nv_i2c_algo_smbus_xfer,
|
||||
.functionality = nv_i2c_algo_functionality,
|
||||
};
|
||||
|
||||
struct i2c_adapter nv_i2c_adapter_prototype = {
|
||||
.owner = THIS_MODULE,
|
||||
.algo = &nv_i2c_algo,
|
||||
.algo_data = NULL,
|
||||
};
|
||||
|
||||
void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
|
||||
{
|
||||
NV_STATUS rmStatus;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct i2c_adapter *pI2cAdapter = NULL;
|
||||
int osstatus = 0;
|
||||
|
||||
// get a i2c adapter
|
||||
rmStatus = os_alloc_mem((void **)&pI2cAdapter,sizeof(struct i2c_adapter));
|
||||
|
||||
if (rmStatus != NV_OK)
|
||||
return NULL;
|
||||
|
||||
// fill in with default structure
|
||||
os_mem_copy(pI2cAdapter, &nv_i2c_adapter_prototype, sizeof(struct i2c_adapter));
|
||||
|
||||
pI2cAdapter->dev.parent = nvl->dev;
|
||||
|
||||
if (nvl->pci_dev != NULL)
|
||||
{
|
||||
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
|
||||
"NVIDIA i2c adapter %u at %x:%02x.%u", port, nv->pci_info.bus,
|
||||
nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn));
|
||||
}
|
||||
else
|
||||
{
|
||||
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
|
||||
"NVIDIA SOC i2c adapter %u", port);
|
||||
}
|
||||
|
||||
// add our data to the structure
|
||||
pI2cAdapter->algo_data = (void *)nv;
|
||||
|
||||
// attempt to register with the kernel
|
||||
osstatus = i2c_add_adapter(pI2cAdapter);
|
||||
|
||||
if (osstatus)
|
||||
{
|
||||
// free the memory and NULL the ptr
|
||||
os_free_mem(pI2cAdapter);
|
||||
|
||||
pI2cAdapter = NULL;
|
||||
}
|
||||
|
||||
return ((void *)pI2cAdapter);
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
|
||||
{
|
||||
struct i2c_adapter *pI2cAdapter = (struct i2c_adapter *)data;
|
||||
|
||||
if (pI2cAdapter)
|
||||
{
|
||||
// release with the OS
|
||||
i2c_del_adapter(pI2cAdapter);
|
||||
os_free_mem(pI2cAdapter);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#else // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
|
||||
|
||||
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
|
||||
{
|
||||
}
|
||||
|
||||
void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
|
||||
448
kernel-open/nvidia/nv-ibmnpu.c
Normal file
448
kernel-open/nvidia/nv-ibmnpu.c
Normal file
@@ -0,0 +1,448 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* nv-ibmnpu.c - interface with the ibmnpu (IBM NVLink Processing Unit) "module"
|
||||
*/
|
||||
#include "nv-linux.h"
|
||||
|
||||
#if defined(NVCPU_PPC64LE)
|
||||
#include "nv-ibmnpu.h"
|
||||
#include "nv-rsync.h"
|
||||
|
||||
/*
|
||||
* Temporary query to get the L1D cache block size directly from the device
|
||||
* tree for the offline cache flush workaround, since the ppc64_caches symbol
|
||||
* is unavailable to us.
|
||||
*/
|
||||
const NvU32 P9_L1D_CACHE_DEFAULT_BLOCK_SIZE = 0x80;
|
||||
|
||||
#if defined(NV_OF_GET_PROPERTY_PRESENT)
|
||||
static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
|
||||
{
|
||||
const __be32 *block_size_prop;
|
||||
|
||||
/*
|
||||
* Attempt to look up the block size from device tree. If unavailable, just
|
||||
* return the default that we see on these systems.
|
||||
*/
|
||||
struct device_node *cpu = of_find_node_by_type(NULL, "cpu");
|
||||
if (!cpu)
|
||||
{
|
||||
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
block_size_prop = of_get_property(cpu, "d-cache-block-size", NULL);
|
||||
if (!block_size_prop)
|
||||
{
|
||||
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
return be32_to_cpu(*block_size_prop);
|
||||
}
|
||||
#else
|
||||
static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
|
||||
{
|
||||
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* GPU device memory can be exposed to the kernel as NUMA node memory via the
|
||||
* IBMNPU devices associated with the GPU. The platform firmware will specify
|
||||
* the parameters of where the memory lives in the system address space via
|
||||
* firmware properties on the IBMNPU devices. These properties specify what
|
||||
* memory can be accessed through the IBMNPU device, and the driver can online
|
||||
* a GPU device's memory into the range accessible by its associated IBMNPU
|
||||
* devices.
|
||||
*
|
||||
* This function calls over to the IBMNPU driver to query the parameters from
|
||||
* firmware, and validates that the resulting parameters are acceptable.
|
||||
*/
|
||||
static void nv_init_ibmnpu_numa_info(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
nv_npu_numa_info_t *npu_numa_info = &nvl->npu->numa_info;
|
||||
struct pci_dev *npu_dev = nvl->npu->devs[0];
|
||||
NvU64 spa, gpa, aper_size;
|
||||
|
||||
/*
|
||||
* Terminology:
|
||||
* - system physical address (spa): 47-bit NVIDIA physical address, which
|
||||
* is the CPU real address with the NVLink address compression scheme
|
||||
* already applied in firmware.
|
||||
* - guest physical address (gpa): 56-bit physical address as seen by the
|
||||
* operating system. This is the base address that we should use for
|
||||
* onlining device memory.
|
||||
*/
|
||||
nvl->numa_info.node_id = ibmnpu_device_get_memory_config(npu_dev, &spa, &gpa,
|
||||
&aper_size);
|
||||
if (nvl->numa_info.node_id == NUMA_NO_NODE)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "No NUMA memory aperture found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Validate that the compressed system physical address is not too wide */
|
||||
if (spa & (~(BIT_ULL(nv_volta_dma_addr_size) - 1)))
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"Invalid NUMA memory system pa 0x%llx"
|
||||
" on IBM-NPU device %04x:%02x:%02x.%u\n",
|
||||
spa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
|
||||
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
|
||||
goto invalid_numa_config;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate that the guest physical address is aligned to 128GB.
|
||||
* This alignment requirement comes from the Volta address space
|
||||
* size on POWER9.
|
||||
*/
|
||||
if (!IS_ALIGNED(gpa, BIT_ULL(nv_volta_addr_space_width)))
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"Invalid alignment in NUMA memory guest pa 0x%llx"
|
||||
" on IBM-NPU device %04x:%02x:%02x.%u\n",
|
||||
gpa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
|
||||
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
|
||||
goto invalid_numa_config;
|
||||
}
|
||||
|
||||
/* Validate that the aperture can map all of the device's framebuffer */
|
||||
if (aper_size < nv->fb->size)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"Insufficient NUMA memory aperture size 0x%llx"
|
||||
" on IBM-NPU device %04x:%02x:%02x.%u (0x%llx required)\n",
|
||||
aper_size, NV_PCI_DOMAIN_NUMBER(npu_dev),
|
||||
NV_PCI_BUS_NUMBER(npu_dev), NV_PCI_SLOT_NUMBER(npu_dev),
|
||||
PCI_FUNC(npu_dev->devfn), nv->fb->size);
|
||||
goto invalid_numa_config;
|
||||
}
|
||||
|
||||
npu_numa_info->compr_sys_phys_addr = spa;
|
||||
npu_numa_info->guest_phys_addr = gpa;
|
||||
|
||||
if (NVreg_EnableUserNUMAManagement)
|
||||
{
|
||||
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "User-mode NUMA onlining disabled.\n");
|
||||
nvl->numa_info.node_id = NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "NUMA memory aperture: "
|
||||
"[spa = 0x%llx, gpa = 0x%llx, aper_size = 0x%llx]\n",
|
||||
spa, gpa, aper_size);
|
||||
|
||||
/* Get the CPU's L1D cache block size for offlining cache flush */
|
||||
npu_numa_info->l1d_cache_block_size = nv_ibm_get_cpu_l1d_cache_block_size();
|
||||
|
||||
return;
|
||||
|
||||
invalid_numa_config:
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"NUMA memory aperture disabled due to invalid firmware configuration\n");
|
||||
nvl->numa_info.node_id = NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
void nv_init_ibmnpu_info(nv_state_t *nv)
|
||||
{
|
||||
#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT)
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct pci_dev *npu_dev = pnv_pci_get_npu_dev(nvl->pci_dev, 0);
|
||||
NvU8 dev_count;
|
||||
|
||||
if (!npu_dev)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (os_alloc_mem((void **)&nvl->npu, sizeof(nv_ibmnpu_info_t)) != NV_OK)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
os_mem_set(nvl->npu, 0, sizeof(nv_ibmnpu_info_t));
|
||||
|
||||
/* Find any other IBMNPU devices attached to this GPU */
|
||||
for (nvl->npu->devs[0] = npu_dev, dev_count = 1;
|
||||
dev_count < NV_MAX_ATTACHED_IBMNPUS; dev_count++)
|
||||
{
|
||||
nvl->npu->devs[dev_count] = pnv_pci_get_npu_dev(nvl->pci_dev, dev_count);
|
||||
if (!nvl->npu->devs[dev_count])
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
nvl->npu->dev_count = dev_count;
|
||||
|
||||
/*
|
||||
* If we run out of space for IBMNPU devices, NV_MAX_ATTACHED_IBMNPUS will
|
||||
* need to be bumped.
|
||||
*/
|
||||
WARN_ON((dev_count == NV_MAX_ATTACHED_IBMNPUS) &&
|
||||
pnv_pci_get_npu_dev(nvl->pci_dev, dev_count));
|
||||
|
||||
ibmnpu_device_get_genregs_info(npu_dev, &nvl->npu->genregs);
|
||||
|
||||
if (nvl->npu->genregs.size > 0)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
|
||||
"IBM-NPU device %04x:%02x:%02x.%u associated with GPU "
|
||||
" has a generation register space 0x%llx-0x%llx\n",
|
||||
NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
|
||||
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn),
|
||||
nvl->npu->genregs.start_addr,
|
||||
nvl->npu->genregs.start_addr + nvl->npu->genregs.size - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
|
||||
"IBM-NPU device %04x:%02x:%02x.%u associated with GPU "
|
||||
"does not support generation registers\n",
|
||||
NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
|
||||
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
|
||||
}
|
||||
|
||||
nv_init_ibmnpu_numa_info(nv);
|
||||
#endif
|
||||
}
|
||||
|
||||
void nv_destroy_ibmnpu_info(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu != NULL)
|
||||
{
|
||||
os_free_mem(nvl->npu);
|
||||
nvl->npu = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int nv_init_ibmnpu_devices(nv_state_t *nv)
|
||||
{
|
||||
NvU8 i;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (!nvl->npu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < nvl->npu->dev_count; i++)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
|
||||
"Initializing IBM-NPU device %04x:%02x:%02x.%u\n",
|
||||
NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]),
|
||||
NV_PCI_BUS_NUMBER(nvl->npu->devs[i]),
|
||||
NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]),
|
||||
PCI_FUNC(nvl->npu->devs[i]->devfn));
|
||||
|
||||
if (ibmnpu_init_device(nvl->npu->devs[i]) != NVL_SUCCESS)
|
||||
{
|
||||
nv_unregister_ibmnpu_devices(nv);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
nvl->npu->initialized_dev_count++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_unregister_ibmnpu_devices(nv_state_t *nv)
|
||||
{
|
||||
NvU8 i;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (!nvl->npu)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nvl->npu->initialized_dev_count; i++)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
|
||||
"Unregistering IBM-NPU device %04x:%02x:%02x.%u\n",
|
||||
NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]),
|
||||
NV_PCI_BUS_NUMBER(nvl->npu->devs[i]),
|
||||
NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]),
|
||||
PCI_FUNC(nvl->npu->devs[i]->devfn));
|
||||
|
||||
ibmnpu_unregister_device(nvl->npu->devs[i]);
|
||||
}
|
||||
|
||||
nvl->npu->initialized_dev_count = 0;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr,
|
||||
NvU64 *size, void **device)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
if (addr)
|
||||
{
|
||||
*addr = nvl->npu->genregs.start_addr;
|
||||
}
|
||||
|
||||
if (size)
|
||||
{
|
||||
*size = nvl->npu->genregs.size;
|
||||
}
|
||||
|
||||
if (device)
|
||||
{
|
||||
*device = (void*)nvl->npu->devs[0];
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv,
|
||||
NvBool *mode)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
*mode = nv_get_rsync_relaxed_ordering_mode(nv);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv_wait_for_rsync(nv);
|
||||
}
|
||||
|
||||
int nv_get_ibmnpu_chip_id(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
return ibmnpu_device_get_chip_id(nvl->npu->devs[0]);
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
NvU64 offset, cbsize;
|
||||
|
||||
/*
|
||||
* The range is commonly an ioremap()ed mapping of the GPU's ATS range and
|
||||
* needs to be compared against the created mappings. Alternatively, kernel
|
||||
* page tables can be dumped through sysfs if CONFIG_PPC_PTDUMP is enabled.
|
||||
*/
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv,
|
||||
"Flushing CPU virtual range [0x%llx, 0x%llx)\n",
|
||||
cpu_virtual, cpu_virtual + size);
|
||||
|
||||
cbsize = nvl->npu->numa_info.l1d_cache_block_size;
|
||||
|
||||
CACHE_FLUSH();
|
||||
|
||||
/* Force eviction of any cache lines from the NUMA-onlined region. */
|
||||
for (offset = 0; offset < size; offset += cbsize)
|
||||
{
|
||||
asm volatile("dcbf %0,%1" :: "r" (cpu_virtual), "r" (offset) : "memory");
|
||||
|
||||
/* Reschedule if necessary to avoid lockup warnings */
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
CACHE_FLUSH();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void nv_init_ibmnpu_info(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
void nv_destroy_ibmnpu_info(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
int nv_init_ibmnpu_devices(nv_state_t *nv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_unregister_ibmnpu_devices(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr,
|
||||
NvU64 *size, void **device)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv,
|
||||
NvBool *mode)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
int nv_get_ibmnpu_chip_id(nv_state_t *nv)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 virtual, NvU64 size)
|
||||
{
|
||||
}
|
||||
|
||||
void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
80
kernel-open/nvidia/nv-ibmnpu.h
Normal file
80
kernel-open/nvidia/nv-ibmnpu.h
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NV_IBMNPU_H_
|
||||
#define _NV_IBMNPU_H_
|
||||
|
||||
#if defined(NVCPU_PPC64LE)
|
||||
|
||||
#include "ibmnpu_linux.h"
|
||||
|
||||
#define NV_MAX_ATTACHED_IBMNPUS 6
|
||||
|
||||
typedef struct nv_npu_numa_info
|
||||
{
|
||||
/*
|
||||
* 47-bit NVIDIA 'system physical address': the hypervisor real 56-bit
|
||||
* address with NVLink address compression scheme applied.
|
||||
*/
|
||||
NvU64 compr_sys_phys_addr;
|
||||
|
||||
/*
|
||||
* 56-bit NVIDIA 'guest physical address'/host virtual address. On
|
||||
* unvirtualized systems, applying the NVLink address compression scheme
|
||||
* to this address should be the same as compr_sys_phys_addr.
|
||||
*/
|
||||
NvU64 guest_phys_addr;
|
||||
|
||||
/*
|
||||
* L1 data cache block size on P9 - needed to manually flush/invalidate the
|
||||
* NUMA region from the CPU caches after offlining.
|
||||
*/
|
||||
NvU32 l1d_cache_block_size;
|
||||
} nv_npu_numa_info_t;
|
||||
|
||||
struct nv_ibmnpu_info
|
||||
{
|
||||
NvU8 dev_count;
|
||||
NvU8 initialized_dev_count;
|
||||
struct pci_dev *devs[NV_MAX_ATTACHED_IBMNPUS];
|
||||
ibmnpu_genregs_info_t genregs;
|
||||
nv_npu_numa_info_t numa_info;
|
||||
};
|
||||
|
||||
/*
|
||||
* TODO: These parameters are specific to Volta/P9 configurations, and may
|
||||
* need to be determined dynamically in the future.
|
||||
*/
|
||||
static const NvU32 nv_volta_addr_space_width = 37;
|
||||
static const NvU32 nv_volta_dma_addr_size = 47;
|
||||
|
||||
#endif
|
||||
|
||||
void nv_init_ibmnpu_info(nv_state_t *nv);
|
||||
void nv_destroy_ibmnpu_info(nv_state_t *nv);
|
||||
int nv_init_ibmnpu_devices(nv_state_t *nv);
|
||||
void nv_unregister_ibmnpu_devices(nv_state_t *nv);
|
||||
int nv_get_ibmnpu_chip_id(nv_state_t *nv);
|
||||
void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv);
|
||||
|
||||
#endif
|
||||
335
kernel-open/nvidia/nv-kthread-q.c
Normal file
335
kernel-open/nvidia/nv-kthread-q.c
Normal file
@@ -0,0 +1,335 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nv-kthread-q.h"
|
||||
#include "nv-list-helpers.h"
|
||||
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#if defined(NV_LINUX_BUG_H_PRESENT)
|
||||
#include <linux/bug.h>
|
||||
#else
|
||||
#include <asm/bug.h>
|
||||
#endif
|
||||
|
||||
// Today's implementation is a little simpler and more limited than the
|
||||
// API description allows for in nv-kthread-q.h. Details include:
|
||||
//
|
||||
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
|
||||
//
|
||||
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
|
||||
//
|
||||
// You can create any number of queues, each of which gets its own
|
||||
// named kernel thread (kthread). You can then insert arbitrary functions
|
||||
// into the queue, and those functions will be run in the context of the
|
||||
// queue's kthread.
|
||||
|
||||
#ifndef WARN
|
||||
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
|
||||
// to implement this, because such kernels won't be supported much longer.
|
||||
#define WARN(condition, format...) ({ \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
if (unlikely(__ret_warn_on)) \
|
||||
printk(KERN_ERR format); \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define NVQ_WARN(fmt, ...) \
|
||||
do { \
|
||||
if (in_interrupt()) { \
|
||||
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
|
||||
##__VA_ARGS__); \
|
||||
} \
|
||||
else { \
|
||||
WARN(1, "nv_kthread_q: task: %s: " fmt, \
|
||||
current->comm, \
|
||||
##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static int _main_loop(void *args)
|
||||
{
|
||||
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
|
||||
nv_kthread_q_item_t *q_item = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
while (1) {
|
||||
// Normally this thread is never interrupted. However,
|
||||
// down_interruptible (instead of down) is called here,
|
||||
// in order to avoid being classified as a potentially
|
||||
// hung task, by the kernel watchdog.
|
||||
while (down_interruptible(&q->q_sem))
|
||||
NVQ_WARN("Interrupted during semaphore wait\n");
|
||||
|
||||
if (atomic_read(&q->main_loop_should_exit))
|
||||
break;
|
||||
|
||||
spin_lock_irqsave(&q->q_lock, flags);
|
||||
|
||||
// The q_sem semaphore prevents us from getting here unless there is
|
||||
// at least one item in the list, so an empty list indicates a bug.
|
||||
if (unlikely(list_empty(&q->q_list_head))) {
|
||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
||||
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Consume one item from the queue
|
||||
q_item = list_first_entry(&q->q_list_head,
|
||||
nv_kthread_q_item_t,
|
||||
q_list_node);
|
||||
|
||||
list_del_init(&q_item->q_list_node);
|
||||
|
||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
||||
|
||||
// Run the item
|
||||
q_item->function_to_run(q_item->function_args);
|
||||
|
||||
// Make debugging a little simpler by clearing this between runs:
|
||||
q_item = NULL;
|
||||
}
|
||||
|
||||
while (!kthread_should_stop())
|
||||
schedule();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_kthread_q_stop(nv_kthread_q_t *q)
|
||||
{
|
||||
// check if queue has been properly initialized
|
||||
if (unlikely(!q->q_kthread))
|
||||
return;
|
||||
|
||||
nv_kthread_q_flush(q);
|
||||
|
||||
// If this assertion fires, then a caller likely either broke the API rules,
|
||||
// by adding items after calling nv_kthread_q_stop, or possibly messed up
|
||||
// with inadequate flushing of self-rescheduling q_items.
|
||||
if (unlikely(!list_empty(&q->q_list_head)))
|
||||
NVQ_WARN("list not empty after flushing\n");
|
||||
|
||||
if (likely(!atomic_read(&q->main_loop_should_exit))) {
|
||||
|
||||
atomic_set(&q->main_loop_should_exit, 1);
|
||||
|
||||
// Wake up the kthread so that it can see that it needs to stop:
|
||||
up(&q->q_sem);
|
||||
|
||||
kthread_stop(q->q_kthread);
|
||||
q->q_kthread = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
|
||||
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
|
||||
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
|
||||
// stack location ends up being a function of the core assigned to the current
|
||||
// thread, instead of being a function of the specified NUMA node. The cache was
|
||||
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
|
||||
// ("fork: Optimize task creation by caching two thread stacks per CPU if
|
||||
// CONFIG_VMAP_STACK=y")
|
||||
//
|
||||
// To work around the problematic cache, we create up to three kernel threads
|
||||
// -If the first thread's stack is resident on the preferred node, return this
|
||||
// thread.
|
||||
// -Otherwise, create a second thread. If its stack is resident on the
|
||||
// preferred node, stop the first thread and return this one.
|
||||
// -Otherwise, create a third thread. The stack allocator does not find a
|
||||
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
|
||||
// consideration. The first two threads are then stopped.
|
||||
//
|
||||
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
|
||||
//
|
||||
// This function is never invoked when there is no NUMA preference (preferred
|
||||
// node is NUMA_NO_NODE).
|
||||
#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1
|
||||
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
||||
nv_kthread_q_t *q,
|
||||
int preferred_node,
|
||||
const char *q_name)
|
||||
{
|
||||
|
||||
unsigned i, j;
|
||||
const static unsigned attempts = 3;
|
||||
struct task_struct *thread[3];
|
||||
|
||||
for (i = 0;; i++) {
|
||||
struct page *stack;
|
||||
|
||||
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
|
||||
|
||||
if (unlikely(IS_ERR(thread[i]))) {
|
||||
|
||||
// Instead of failing, pick the previous thread, even if its
|
||||
// stack is not allocated on the preferred node.
|
||||
if (i > 0)
|
||||
i--;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// vmalloc is not used to allocate the stack, so simply return the
|
||||
// thread, even if its stack may not be allocated on the preferred node
|
||||
if (!is_vmalloc_addr(thread[i]->stack))
|
||||
break;
|
||||
|
||||
// Ran out of attempts - return thread even if its stack may not be
|
||||
// allocated on the preferred node
|
||||
if ((i == (attempts - 1)))
|
||||
break;
|
||||
|
||||
// Get the NUMA node where the first page of the stack is resident. If
|
||||
// it is the preferred node, select this thread.
|
||||
stack = vmalloc_to_page(thread[i]->stack);
|
||||
if (page_to_nid(stack) == preferred_node)
|
||||
break;
|
||||
}
|
||||
|
||||
for (j = i; j > 0; j--)
|
||||
kthread_stop(thread[j - 1]);
|
||||
|
||||
return thread[i];
|
||||
}
|
||||
#endif
|
||||
|
||||
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
|
||||
{
|
||||
memset(q, 0, sizeof(*q));
|
||||
|
||||
INIT_LIST_HEAD(&q->q_list_head);
|
||||
spin_lock_init(&q->q_lock);
|
||||
sema_init(&q->q_sem, 0);
|
||||
|
||||
if (preferred_node == NV_KTHREAD_NO_NODE) {
|
||||
q->q_kthread = kthread_create(_main_loop, q, q_name);
|
||||
}
|
||||
else {
|
||||
#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1
|
||||
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
|
||||
#else
|
||||
return -ENOTSUPP;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (IS_ERR(q->q_kthread)) {
|
||||
int err = PTR_ERR(q->q_kthread);
|
||||
|
||||
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
|
||||
// safely called on it making error handling easier.
|
||||
q->q_kthread = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
wake_up_process(q->q_kthread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
||||
// item was already pending in a queue.
|
||||
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irqsave(&q->q_lock, flags);
|
||||
|
||||
if (likely(list_empty(&q_item->q_list_node)))
|
||||
list_add_tail(&q_item->q_list_node, &q->q_list_head);
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
||||
|
||||
if (likely(ret))
|
||||
up(&q->q_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
|
||||
nv_q_func_t function_to_run,
|
||||
void *function_args)
|
||||
{
|
||||
INIT_LIST_HEAD(&q_item->q_list_node);
|
||||
q_item->function_to_run = function_to_run;
|
||||
q_item->function_args = function_args;
|
||||
}
|
||||
|
||||
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
|
||||
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
|
||||
nv_kthread_q_item_t *q_item)
|
||||
{
|
||||
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
|
||||
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
|
||||
"called with a non-alive q: 0x%p\n", q);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return _raw_q_schedule(q, q_item);
|
||||
}
|
||||
|
||||
static void _q_flush_function(void *args)
|
||||
{
|
||||
struct completion *completion = (struct completion *)args;
|
||||
complete(completion);
|
||||
}
|
||||
|
||||
|
||||
static void _raw_q_flush(nv_kthread_q_t *q)
|
||||
{
|
||||
nv_kthread_q_item_t q_item;
|
||||
DECLARE_COMPLETION(completion);
|
||||
|
||||
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
|
||||
|
||||
_raw_q_schedule(q, &q_item);
|
||||
|
||||
// Wait for the flush item to run. Once it has run, then all of the
|
||||
// previously queued items in front of it will have run, so that means
|
||||
// the flush is complete.
|
||||
wait_for_completion(&completion);
|
||||
}
|
||||
|
||||
void nv_kthread_q_flush(nv_kthread_q_t *q)
|
||||
{
|
||||
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
|
||||
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
|
||||
"nv_kthread_q_stop. q: 0x%p\n", q);
|
||||
return;
|
||||
}
|
||||
|
||||
// This 2x flush is not a typing mistake. The queue really does have to be
|
||||
// flushed twice, in order to take care of the case of a q_item that
|
||||
// reschedules itself.
|
||||
_raw_q_flush(q);
|
||||
_raw_q_flush(q);
|
||||
}
|
||||
232
kernel-open/nvidia/nv-memdbg.c
Normal file
232
kernel-open/nvidia/nv-memdbg.c
Normal file
@@ -0,0 +1,232 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nv-memdbg.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
/* track who's allocating memory and print out a list of leaked allocations at
|
||||
* teardown.
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
struct rb_node rb_node;
|
||||
void *addr;
|
||||
NvU64 size;
|
||||
NvU32 line;
|
||||
const char *file;
|
||||
} nv_memdbg_node_t;
|
||||
|
||||
struct
|
||||
{
|
||||
struct rb_root rb_root;
|
||||
NvU64 untracked_bytes;
|
||||
NvU64 num_untracked_allocs;
|
||||
nv_spinlock_t lock;
|
||||
} g_nv_memdbg;
|
||||
|
||||
void nv_memdbg_init(void)
|
||||
{
|
||||
NV_SPIN_LOCK_INIT(&g_nv_memdbg.lock);
|
||||
g_nv_memdbg.rb_root = RB_ROOT;
|
||||
}
|
||||
|
||||
static nv_memdbg_node_t *nv_memdbg_node_entry(struct rb_node *rb_node)
|
||||
{
|
||||
return rb_entry(rb_node, nv_memdbg_node_t, rb_node);
|
||||
}
|
||||
|
||||
static void nv_memdbg_insert_node(nv_memdbg_node_t *new)
|
||||
{
|
||||
nv_memdbg_node_t *node;
|
||||
struct rb_node **rb_node = &g_nv_memdbg.rb_root.rb_node;
|
||||
struct rb_node *rb_parent = NULL;
|
||||
|
||||
while (*rb_node)
|
||||
{
|
||||
node = nv_memdbg_node_entry(*rb_node);
|
||||
|
||||
WARN_ON(new->addr == node->addr);
|
||||
|
||||
rb_parent = *rb_node;
|
||||
|
||||
if (new->addr < node->addr)
|
||||
rb_node = &(*rb_node)->rb_left;
|
||||
else
|
||||
rb_node = &(*rb_node)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&new->rb_node, rb_parent, rb_node);
|
||||
rb_insert_color(&new->rb_node, &g_nv_memdbg.rb_root);
|
||||
}
|
||||
|
||||
static nv_memdbg_node_t *nv_memdbg_remove_node(void *addr)
|
||||
{
|
||||
nv_memdbg_node_t *node = NULL;
|
||||
struct rb_node *rb_node = g_nv_memdbg.rb_root.rb_node;
|
||||
|
||||
while (rb_node)
|
||||
{
|
||||
node = nv_memdbg_node_entry(rb_node);
|
||||
if (addr == node->addr)
|
||||
break;
|
||||
else if (addr < node->addr)
|
||||
rb_node = rb_node->rb_left;
|
||||
else
|
||||
rb_node = rb_node->rb_right;
|
||||
}
|
||||
|
||||
WARN_ON(!node || node->addr != addr);
|
||||
|
||||
rb_erase(&node->rb_node, &g_nv_memdbg.rb_root);
|
||||
return node;
|
||||
}
|
||||
|
||||
void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line)
|
||||
{
|
||||
nv_memdbg_node_t *node;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(addr == NULL);
|
||||
|
||||
/* If node allocation fails, we can still update the untracked counters */
|
||||
node = kmalloc(sizeof(*node),
|
||||
NV_MAY_SLEEP() ? NV_GFP_KERNEL : NV_GFP_ATOMIC);
|
||||
if (node)
|
||||
{
|
||||
node->addr = addr;
|
||||
node->size = size;
|
||||
node->file = file;
|
||||
node->line = line;
|
||||
}
|
||||
|
||||
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
|
||||
|
||||
if (node)
|
||||
{
|
||||
nv_memdbg_insert_node(node);
|
||||
}
|
||||
else
|
||||
{
|
||||
++g_nv_memdbg.num_untracked_allocs;
|
||||
g_nv_memdbg.untracked_bytes += size;
|
||||
}
|
||||
|
||||
NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags);
|
||||
}
|
||||
|
||||
void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line)
|
||||
{
|
||||
nv_memdbg_node_t *node;
|
||||
unsigned long flags;
|
||||
|
||||
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
|
||||
|
||||
node = nv_memdbg_remove_node(addr);
|
||||
if (!node)
|
||||
{
|
||||
WARN_ON(g_nv_memdbg.num_untracked_allocs == 0);
|
||||
WARN_ON(g_nv_memdbg.untracked_bytes < size);
|
||||
--g_nv_memdbg.num_untracked_allocs;
|
||||
g_nv_memdbg.untracked_bytes -= size;
|
||||
}
|
||||
|
||||
NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags);
|
||||
|
||||
if (node)
|
||||
{
|
||||
if ((size != 0) && (node->size != size))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: size mismatch on free: %llu != %llu\n",
|
||||
size, node->size);
|
||||
if (node->file)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: allocation: 0x%p @ %s:%d\n",
|
||||
node->addr, node->file, node->line);
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: allocation: 0x%p\n",
|
||||
node->addr);
|
||||
}
|
||||
os_dbg_breakpoint();
|
||||
}
|
||||
|
||||
kfree(node);
|
||||
}
|
||||
}
|
||||
|
||||
void nv_memdbg_exit(void)
|
||||
{
|
||||
nv_memdbg_node_t *node;
|
||||
NvU64 leaked_bytes = 0, num_leaked_allocs = 0;
|
||||
|
||||
if (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: list of leaked memory allocations:\n");
|
||||
}
|
||||
|
||||
while (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root))
|
||||
{
|
||||
node = nv_memdbg_node_entry(rb_first(&g_nv_memdbg.rb_root));
|
||||
|
||||
leaked_bytes += node->size;
|
||||
++num_leaked_allocs;
|
||||
|
||||
if (node->file)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %llu bytes, 0x%p @ %s:%d\n",
|
||||
node->size, node->addr, node->file, node->line);
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %llu bytes, 0x%p\n",
|
||||
node->size, node->addr);
|
||||
}
|
||||
|
||||
rb_erase(&node->rb_node, &g_nv_memdbg.rb_root);
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
/* If we failed to allocate a node at some point, we may have leaked memory
|
||||
* even if the tree is empty */
|
||||
if (num_leaked_allocs > 0 || g_nv_memdbg.num_untracked_allocs > 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: total leaked memory: %llu bytes in %llu allocations\n",
|
||||
leaked_bytes + g_nv_memdbg.untracked_bytes,
|
||||
num_leaked_allocs + g_nv_memdbg.num_untracked_allocs);
|
||||
|
||||
if (g_nv_memdbg.num_untracked_allocs > 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %llu bytes in %llu allocations untracked\n",
|
||||
g_nv_memdbg.untracked_bytes, g_nv_memdbg.num_untracked_allocs);
|
||||
}
|
||||
}
|
||||
}
|
||||
780
kernel-open/nvidia/nv-mmap.c
Normal file
780
kernel-open/nvidia/nv-mmap.c
Normal file
@@ -0,0 +1,780 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv_speculation_barrier.h"
|
||||
|
||||
/*
|
||||
* The 'struct vm_operations' open() callback is called by the Linux
|
||||
* kernel when the parent VMA is split or copied, close() when the
|
||||
* current VMA is about to be deleted.
|
||||
*
|
||||
* We implement these callbacks to keep track of the number of user
|
||||
* mappings of system memory allocations. This was motivated by a
|
||||
* subtle interaction problem between the driver and the kernel with
|
||||
* respect to the bookkeeping of pages marked reserved and later
|
||||
* mapped with mmap().
|
||||
*
|
||||
* Traditionally, the Linux kernel ignored reserved pages, such that
|
||||
* when they were mapped via mmap(), the integrity of their usage
|
||||
* counts depended on the reserved bit being set for as long as user
|
||||
* mappings existed.
|
||||
*
|
||||
* Since we mark system memory pages allocated for DMA reserved and
|
||||
* typically map them with mmap(), we need to ensure they remain
|
||||
* reserved until the last mapping has been torn down. This worked
|
||||
* correctly in most cases, but in a few, the RM API called into the
|
||||
* RM to free memory before calling munmap() to unmap it.
|
||||
*
|
||||
* In the past, we allowed nv_free_pages() to remove the 'at' from
|
||||
* the parent device's allocation list in this case, but didn't
|
||||
* release the underlying pages until the last user mapping had been
|
||||
* destroyed:
|
||||
*
|
||||
* In nvidia_vma_release(), we freed any resources associated with
|
||||
* the allocation (IOMMU mappings, etc.) and cleared the
|
||||
* underlying pages' reserved bits, but didn't free them. The kernel
|
||||
* was expected to do this.
|
||||
*
|
||||
* This worked in practise, but made dangerous assumptions about the
|
||||
* kernel's behavior and could fail in some cases. We now handle
|
||||
* this case differently (see below).
|
||||
*/
|
||||
static void
|
||||
nvidia_vma_open(struct vm_area_struct *vma)
|
||||
{
|
||||
nv_alloc_t *at = NV_VMA_PRIVATE(vma);
|
||||
|
||||
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
|
||||
|
||||
if (at != NULL)
|
||||
{
|
||||
NV_ATOMIC_INC(at->usage_count);
|
||||
|
||||
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* (see above for additional information)
|
||||
*
|
||||
* If the 'at' usage count drops to zero with the updated logic, the
|
||||
* the allocation is recorded in the free list of the private
|
||||
* data associated with the file pointer; nvidia_close() uses this
|
||||
* list to perform deferred free operations when the parent file
|
||||
* descriptor is closed. This will typically happen when the process
|
||||
* exits.
|
||||
*
|
||||
* Since this is technically a workaround to handle possible fallout
|
||||
* from misbehaving clients, we additionally print a warning.
|
||||
*/
|
||||
static void
|
||||
nvidia_vma_release(struct vm_area_struct *vma)
|
||||
{
|
||||
nv_alloc_t *at = NV_VMA_PRIVATE(vma);
|
||||
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
|
||||
static int count = 0;
|
||||
|
||||
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
|
||||
|
||||
if (at != NULL && nv_alloc_release(nvlfp, at))
|
||||
{
|
||||
if ((at->pid == os_get_current_process()) &&
|
||||
(count++ < NV_MAX_RECURRING_WARNING_MESSAGES))
|
||||
{
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: late unmap, comm: %s, 0x%p\n",
|
||||
__FUNCTION__, current->comm, at);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nvidia_vma_access(
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
void *buffer,
|
||||
int length,
|
||||
int write
|
||||
)
|
||||
{
|
||||
nv_alloc_t *at = NULL;
|
||||
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
|
||||
nv_state_t *nv = NV_STATE_PTR(nvlfp->nvptr);
|
||||
NvU32 pageIndex, pageOffset;
|
||||
void *kernel_mapping;
|
||||
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
|
||||
NvU64 offset;
|
||||
|
||||
pageIndex = ((addr - vma->vm_start) >> PAGE_SHIFT);
|
||||
pageOffset = (addr & ~PAGE_MASK);
|
||||
|
||||
if (!mmap_context->valid)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
offset = mmap_context->mmap_start;
|
||||
|
||||
if (nv->flags & NV_FLAG_CONTROL)
|
||||
{
|
||||
at = NV_VMA_PRIVATE(vma);
|
||||
|
||||
/*
|
||||
* at can be NULL for peer IO mem.
|
||||
*/
|
||||
if (!at)
|
||||
return -EINVAL;
|
||||
|
||||
if (pageIndex >= at->num_pages)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* For PPC64LE build, nv_array_index_no_speculate() is not defined
|
||||
* therefore call nv_speculation_barrier().
|
||||
* When this definition is added, this platform check should be removed.
|
||||
*/
|
||||
#if !defined(NVCPU_PPC64LE)
|
||||
pageIndex = nv_array_index_no_speculate(pageIndex, at->num_pages);
|
||||
#else
|
||||
nv_speculation_barrier();
|
||||
#endif
|
||||
kernel_mapping = (void *)(at->page_table[pageIndex]->virt_addr + pageOffset);
|
||||
}
|
||||
else if (IS_FB_OFFSET(nv, offset, length))
|
||||
{
|
||||
addr = (offset & PAGE_MASK);
|
||||
kernel_mapping = os_map_kernel_space(addr, PAGE_SIZE, NV_MEMORY_UNCACHED);
|
||||
if (kernel_mapping == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
kernel_mapping = ((char *)kernel_mapping + pageOffset);
|
||||
}
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
length = NV_MIN(length, (int)(PAGE_SIZE - pageOffset));
|
||||
|
||||
if (write)
|
||||
memcpy(kernel_mapping, buffer, length);
|
||||
else
|
||||
memcpy(buffer, kernel_mapping, length);
|
||||
|
||||
if (at == NULL)
|
||||
{
|
||||
kernel_mapping = ((char *)kernel_mapping - pageOffset);
|
||||
os_unmap_kernel_space(kernel_mapping, PAGE_SIZE);
|
||||
}
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
static vm_fault_t nvidia_fault(
|
||||
#if !defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
|
||||
struct vm_area_struct *vma,
|
||||
#endif
|
||||
struct vm_fault *vmf
|
||||
)
|
||||
{
|
||||
#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
#endif
|
||||
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
|
||||
nv_linux_state_t *nvl = nvlfp->nvptr;
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
vm_fault_t ret = VM_FAULT_NOPAGE;
|
||||
|
||||
NvU64 page;
|
||||
NvU64 num_pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT;
|
||||
NvU64 pfn_start =
|
||||
(nvlfp->mmap_context.mmap_start >> PAGE_SHIFT) + vma->vm_pgoff;
|
||||
|
||||
// Mapping revocation is only supported for GPU mappings.
|
||||
if (NV_IS_CTL_DEVICE(nv))
|
||||
{
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
// Wake up GPU and reinstate mappings only if we are not in S3/S4 entry
|
||||
if (!down_read_trylock(&nv_system_pm_lock))
|
||||
{
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
down(&nvl->mmap_lock);
|
||||
|
||||
// Wake up the GPU if it is not currently safe to mmap.
|
||||
if (!nvl->safe_to_mmap)
|
||||
{
|
||||
NV_STATUS status;
|
||||
|
||||
if (!nvl->gpu_wakeup_callback_needed)
|
||||
{
|
||||
// GPU wakeup callback already scheduled.
|
||||
up(&nvl->mmap_lock);
|
||||
up_read(&nv_system_pm_lock);
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* GPU wakeup cannot be completed directly in the fault handler due to the
|
||||
* inability to take the GPU lock while mmap_lock is held.
|
||||
*/
|
||||
status = rm_schedule_gpu_wakeup(nvl->sp[NV_DEV_STACK_GPU_WAKEUP], nv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: rm_schedule_gpu_wakeup failed: %x\n", status);
|
||||
up(&nvl->mmap_lock);
|
||||
up_read(&nv_system_pm_lock);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
// Ensure that we do not schedule duplicate GPU wakeup callbacks.
|
||||
nvl->gpu_wakeup_callback_needed = NV_FALSE;
|
||||
|
||||
up(&nvl->mmap_lock);
|
||||
up_read(&nv_system_pm_lock);
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
// Safe to mmap, map all pages in this VMA.
|
||||
for (page = 0; page < num_pages; page++)
|
||||
{
|
||||
NvU64 virt_addr = vma->vm_start + (page << PAGE_SHIFT);
|
||||
NvU64 pfn = pfn_start + page;
|
||||
|
||||
ret = nv_insert_pfn(vma, virt_addr, pfn,
|
||||
nvlfp->mmap_context.remap_prot_extra);
|
||||
if (ret != VM_FAULT_NOPAGE)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: nv_insert_pfn failed: %x\n", ret);
|
||||
break;
|
||||
}
|
||||
|
||||
nvl->all_mappings_revoked = NV_FALSE;
|
||||
}
|
||||
up(&nvl->mmap_lock);
|
||||
up_read(&nv_system_pm_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct vm_operations_struct nv_vm_ops = {
|
||||
.open = nvidia_vma_open,
|
||||
.close = nvidia_vma_release,
|
||||
.fault = nvidia_fault,
|
||||
.access = nvidia_vma_access,
|
||||
};
|
||||
|
||||
int nv_encode_caching(
|
||||
pgprot_t *prot,
|
||||
NvU32 cache_type,
|
||||
nv_memory_type_t memory_type
|
||||
)
|
||||
{
|
||||
pgprot_t tmp;
|
||||
|
||||
if (prot == NULL)
|
||||
{
|
||||
tmp = __pgprot(0);
|
||||
prot = &tmp;
|
||||
}
|
||||
|
||||
switch (cache_type)
|
||||
{
|
||||
case NV_MEMORY_UNCACHED_WEAK:
|
||||
#if defined(NV_PGPROT_UNCACHED_WEAK)
|
||||
*prot = NV_PGPROT_UNCACHED_WEAK(*prot);
|
||||
break;
|
||||
#endif
|
||||
case NV_MEMORY_UNCACHED:
|
||||
*prot = (memory_type == NV_MEMORY_TYPE_SYSTEM) ?
|
||||
NV_PGPROT_UNCACHED(*prot) :
|
||||
NV_PGPROT_UNCACHED_DEVICE(*prot);
|
||||
break;
|
||||
#if defined(NV_PGPROT_WRITE_COMBINED) && \
|
||||
defined(NV_PGPROT_WRITE_COMBINED_DEVICE)
|
||||
case NV_MEMORY_WRITECOMBINED:
|
||||
if (NV_ALLOW_WRITE_COMBINING(memory_type))
|
||||
{
|
||||
*prot = (memory_type == NV_MEMORY_TYPE_FRAMEBUFFER) ?
|
||||
NV_PGPROT_WRITE_COMBINED_DEVICE(*prot) :
|
||||
NV_PGPROT_WRITE_COMBINED(*prot);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If WC support is unavailable, we need to return an error
|
||||
* code to the caller, but need not print a warning.
|
||||
*
|
||||
* For frame buffer memory, callers are expected to use the
|
||||
* UC- memory type if we report WC as unsupported, which
|
||||
* translates to the effective memory type WC if a WC MTRR
|
||||
* exists or else UC.
|
||||
*/
|
||||
return 1;
|
||||
#endif
|
||||
case NV_MEMORY_CACHED:
|
||||
if (NV_ALLOW_CACHING(memory_type))
|
||||
break;
|
||||
// Intentional fallthrough.
|
||||
default:
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: cache type %d not supported for memory type %d!\n",
|
||||
cache_type, memory_type);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int static nvidia_mmap_peer_io(
|
||||
struct vm_area_struct *vma,
|
||||
nv_alloc_t *at,
|
||||
NvU64 page_index,
|
||||
NvU64 pages
|
||||
)
|
||||
{
|
||||
int ret;
|
||||
NvU64 start;
|
||||
NvU64 size;
|
||||
|
||||
BUG_ON(!at->flags.contig);
|
||||
|
||||
start = at->page_table[page_index]->phys_addr;
|
||||
size = pages * PAGE_SIZE;
|
||||
|
||||
ret = nv_io_remap_page_range(vma, start, size, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int static nvidia_mmap_sysmem(
|
||||
struct vm_area_struct *vma,
|
||||
nv_alloc_t *at,
|
||||
NvU64 page_index,
|
||||
NvU64 pages
|
||||
)
|
||||
{
|
||||
NvU64 j;
|
||||
int ret = 0;
|
||||
unsigned long start = 0;
|
||||
|
||||
NV_ATOMIC_INC(at->usage_count);
|
||||
|
||||
start = vma->vm_start;
|
||||
for (j = page_index; j < (page_index + pages); j++)
|
||||
{
|
||||
/*
|
||||
* For PPC64LE build, nv_array_index_no_speculate() is not defined
|
||||
* therefore call nv_speculation_barrier().
|
||||
* When this definition is added, this platform check should be removed.
|
||||
*/
|
||||
#if !defined(NVCPU_PPC64LE)
|
||||
j = nv_array_index_no_speculate(j, (page_index + pages));
|
||||
#else
|
||||
nv_speculation_barrier();
|
||||
#endif
|
||||
|
||||
#if defined(NV_VGPU_KVM_BUILD)
|
||||
if (at->flags.guest)
|
||||
{
|
||||
ret = nv_remap_page_range(vma, start, at->page_table[j]->phys_addr,
|
||||
PAGE_SIZE, vma->vm_page_prot);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot, 0);
|
||||
ret = vm_insert_page(vma, start,
|
||||
NV_GET_PAGE_STRUCT(at->page_table[j]->phys_addr));
|
||||
}
|
||||
|
||||
if (ret)
|
||||
{
|
||||
NV_ATOMIC_DEC(at->usage_count);
|
||||
return -EAGAIN;
|
||||
}
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvidia_mmap_numa(
|
||||
struct vm_area_struct *vma,
|
||||
const nv_alloc_mapping_context_t *mmap_context)
|
||||
{
|
||||
NvU64 start, addr;
|
||||
unsigned int pages;
|
||||
NvU64 i;
|
||||
|
||||
pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT;
|
||||
start = vma->vm_start;
|
||||
|
||||
if (mmap_context->num_pages < pages)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
// Needed for the linux kernel for mapping compound pages
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
|
||||
for (i = 0, addr = mmap_context->page_array[0]; i < pages;
|
||||
addr = mmap_context->page_array[++i], start += PAGE_SIZE)
|
||||
{
|
||||
if (vm_insert_page(vma, start, NV_GET_PAGE_STRUCT(addr)) != 0)
|
||||
{
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvidia_mmap_helper(
|
||||
nv_state_t *nv,
|
||||
nv_linux_file_private_t *nvlfp,
|
||||
nvidia_stack_t *sp,
|
||||
struct vm_area_struct *vma,
|
||||
void *vm_priv
|
||||
)
|
||||
{
|
||||
NvU32 prot = 0;
|
||||
int ret;
|
||||
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
NV_STATUS status;
|
||||
|
||||
if (nvlfp == NULL)
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
|
||||
/*
|
||||
* If mmap context is not valid on this file descriptor, this mapping wasn't
|
||||
* previously validated with the RM so it must be rejected.
|
||||
*/
|
||||
if (!mmap_context->valid)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
|
||||
|
||||
status = nv_check_gpu_state(nv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv,
|
||||
"GPU is lost, skipping nvidia_mmap_helper\n");
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_VMA_PRIVATE(vma) = vm_priv;
|
||||
|
||||
prot = mmap_context->prot;
|
||||
|
||||
/*
|
||||
* Nvidia device node(nvidia#) maps device's BAR memory,
|
||||
* Nvidia control node(nvidiactrl) maps system memory.
|
||||
*/
|
||||
if (!NV_IS_CTL_DEVICE(nv))
|
||||
{
|
||||
NvU32 remap_prot_extra = mmap_context->remap_prot_extra;
|
||||
NvU64 mmap_start = mmap_context->mmap_start;
|
||||
NvU64 mmap_length = mmap_context->mmap_size;
|
||||
NvU64 access_start = mmap_context->access_start;
|
||||
NvU64 access_len = mmap_context->access_size;
|
||||
|
||||
if (IS_REG_OFFSET(nv, access_start, access_len))
|
||||
{
|
||||
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,
|
||||
NV_MEMORY_TYPE_REGISTERS))
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
else if (IS_FB_OFFSET(nv, access_start, access_len))
|
||||
{
|
||||
if (IS_UD_OFFSET(nv, access_start, access_len))
|
||||
{
|
||||
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,
|
||||
NV_MEMORY_TYPE_FRAMEBUFFER))
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (nv_encode_caching(&vma->vm_page_prot,
|
||||
rm_disable_iomap_wc() ? NV_MEMORY_UNCACHED : NV_MEMORY_WRITECOMBINED,
|
||||
NV_MEMORY_TYPE_FRAMEBUFFER))
|
||||
{
|
||||
if (nv_encode_caching(&vma->vm_page_prot,
|
||||
NV_MEMORY_UNCACHED_WEAK, NV_MEMORY_TYPE_FRAMEBUFFER))
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
down(&nvl->mmap_lock);
|
||||
if (nvl->safe_to_mmap)
|
||||
{
|
||||
nvl->all_mappings_revoked = NV_FALSE;
|
||||
|
||||
//
|
||||
// This path is similar to the sysmem mapping code.
|
||||
// TODO: Refactor is needed as part of bug#2001704.
|
||||
// Use pfn_valid to determine whether the physical address has
|
||||
// backing struct page. This is used to isolate P8 from P9.
|
||||
//
|
||||
if ((nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) &&
|
||||
!IS_REG_OFFSET(nv, access_start, access_len) &&
|
||||
(pfn_valid(PFN_DOWN(mmap_start))))
|
||||
{
|
||||
ret = nvidia_mmap_numa(vma, mmap_context);
|
||||
if (ret)
|
||||
{
|
||||
up(&nvl->mmap_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (nv_io_remap_page_range(vma, mmap_start, mmap_length,
|
||||
remap_prot_extra) != 0)
|
||||
{
|
||||
up(&nvl->mmap_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
}
|
||||
up(&nvl->mmap_lock);
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND;
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_alloc_t *at;
|
||||
NvU64 page_index;
|
||||
NvU64 pages;
|
||||
NvU64 mmap_size;
|
||||
|
||||
at = (nv_alloc_t *)mmap_context->alloc;
|
||||
page_index = mmap_context->page_index;
|
||||
mmap_size = NV_VMA_SIZE(vma);
|
||||
pages = mmap_size >> PAGE_SHIFT;
|
||||
|
||||
if ((page_index + pages) > at->num_pages)
|
||||
{
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Callers that pass in non-NULL VMA private data must never reach this
|
||||
* code. They should be mapping on a non-control node.
|
||||
*/
|
||||
BUG_ON(NV_VMA_PRIVATE(vma));
|
||||
|
||||
if (at->flags.peer_io)
|
||||
{
|
||||
if (nv_encode_caching(&vma->vm_page_prot,
|
||||
at->cache_type,
|
||||
NV_MEMORY_TYPE_DEVICE_MMIO))
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* There is no need to keep 'peer IO at' alive till vma_release like
|
||||
* 'sysmem at' because there are no security concerns where a client
|
||||
* could free RM allocated sysmem before unmapping it. Hence, vm_ops
|
||||
* are NOP, and at->usage_count is never being used.
|
||||
*/
|
||||
NV_VMA_PRIVATE(vma) = NULL;
|
||||
|
||||
ret = nvidia_mmap_peer_io(vma, at, page_index, pages);
|
||||
|
||||
BUG_ON(NV_VMA_PRIVATE(vma));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (nv_encode_caching(&vma->vm_page_prot,
|
||||
at->cache_type,
|
||||
NV_MEMORY_TYPE_SYSTEM))
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
NV_VMA_PRIVATE(vma) = at;
|
||||
|
||||
ret = nvidia_mmap_sysmem(vma, at, page_index, pages);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
||||
|
||||
vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED);
|
||||
vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
|
||||
}
|
||||
|
||||
if ((prot & NV_PROTECT_WRITEABLE) == 0)
|
||||
{
|
||||
vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot);
|
||||
vma->vm_flags &= ~VM_WRITE;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
}
|
||||
|
||||
vma->vm_ops = &nv_vm_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvidia_mmap(
|
||||
struct file *file,
|
||||
struct vm_area_struct *vma
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file);
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
||||
nvidia_stack_t *sp = NULL;
|
||||
int status;
|
||||
|
||||
//
|
||||
// Do not allow mmap operation if this is a fd into
|
||||
// which rm objects have been exported.
|
||||
//
|
||||
if (nvlfp->nvfp.handles != NULL)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
down(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]);
|
||||
|
||||
sp = nvlfp->fops_sp[NV_FOPS_STACK_INDEX_MMAP];
|
||||
|
||||
status = nvidia_mmap_helper(nv, nvlfp, sp, vma, NULL);
|
||||
|
||||
up(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void
|
||||
nv_revoke_gpu_mappings_locked(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
nv_linux_file_private_t *nvlfp;
|
||||
|
||||
/* Revoke all mappings for every open file */
|
||||
list_for_each_entry (nvlfp, &nvl->open_files, entry)
|
||||
{
|
||||
unmap_mapping_range(&nvlfp->mapping, 0, ~0, 1);
|
||||
}
|
||||
|
||||
nvl->all_mappings_revoked = NV_TRUE;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_revoke_gpu_mappings(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
// Mapping revocation is only supported for GPU mappings.
|
||||
if (NV_IS_CTL_DEVICE(nv))
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
down(&nvl->mmap_lock);
|
||||
|
||||
nv_revoke_gpu_mappings_locked(nv);
|
||||
|
||||
up(&nvl->mmap_lock);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_acquire_mmap_lock(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
down(&nvl->mmap_lock);
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_release_mmap_lock(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
up(&nvl->mmap_lock);
|
||||
}
|
||||
|
||||
NvBool NV_API_CALL nv_get_all_mappings_revoked_locked(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
// Caller must hold nvl->mmap_lock for all decisions based on this
|
||||
return nvl->all_mappings_revoked;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_set_safe_to_mmap_locked(
|
||||
nv_state_t *nv,
|
||||
NvBool safe_to_mmap
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
// Caller must hold nvl->mmap_lock
|
||||
|
||||
/*
|
||||
* If nvl->safe_to_mmap is transitioning from TRUE to FALSE, we expect to
|
||||
* need to schedule a GPU wakeup callback when we fault.
|
||||
*
|
||||
* nvl->gpu_wakeup_callback_needed will be set to FALSE in nvidia_fault()
|
||||
* after scheduling the GPU wakeup callback, preventing us from scheduling
|
||||
* duplicates.
|
||||
*/
|
||||
if (!safe_to_mmap && nvl->safe_to_mmap)
|
||||
{
|
||||
nvl->gpu_wakeup_callback_needed = NV_TRUE;
|
||||
}
|
||||
|
||||
nvl->safe_to_mmap = safe_to_mmap;
|
||||
}
|
||||
146
kernel-open/nvidia/nv-modeset-interface.c
Normal file
146
kernel-open/nvidia/nv-modeset-interface.c
Normal file
@@ -0,0 +1,146 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nv-modeset-interface.h"
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nvstatus.h"
|
||||
#include "nv.h"
|
||||
|
||||
static const nvidia_modeset_callbacks_t *nv_modeset_callbacks;
|
||||
|
||||
static int nvidia_modeset_rm_ops_alloc_stack(nvidia_stack_t **sp)
|
||||
{
|
||||
return nv_kmem_cache_alloc_stack(sp);
|
||||
}
|
||||
|
||||
static void nvidia_modeset_rm_ops_free_stack(nvidia_stack_t *sp)
|
||||
{
|
||||
if (sp != NULL)
|
||||
{
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
}
|
||||
}
|
||||
|
||||
static int nvidia_modeset_set_callbacks(const nvidia_modeset_callbacks_t *cb)
|
||||
{
|
||||
if ((nv_modeset_callbacks != NULL && cb != NULL) ||
|
||||
(nv_modeset_callbacks == NULL && cb == NULL))
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_modeset_callbacks = cb;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvidia_modeset_suspend(NvU32 gpuId)
|
||||
{
|
||||
if (nv_modeset_callbacks)
|
||||
{
|
||||
nv_modeset_callbacks->suspend(gpuId);
|
||||
}
|
||||
}
|
||||
|
||||
void nvidia_modeset_resume(NvU32 gpuId)
|
||||
{
|
||||
if (nv_modeset_callbacks)
|
||||
{
|
||||
nv_modeset_callbacks->resume(gpuId);
|
||||
}
|
||||
}
|
||||
|
||||
static NvU32 nvidia_modeset_enumerate_gpus(nv_gpu_info_t *gpu_info)
|
||||
{
|
||||
nv_linux_state_t *nvl;
|
||||
unsigned int count;
|
||||
|
||||
LOCK_NV_LINUX_DEVICES();
|
||||
|
||||
count = 0;
|
||||
|
||||
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
|
||||
{
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
|
||||
/*
|
||||
* The gpu_info[] array has NV_MAX_GPUS elements. Fail if there
|
||||
* are more GPUs than that.
|
||||
*/
|
||||
if (count >= NV_MAX_GPUS) {
|
||||
nv_printf(NV_DBG_WARNINGS, "NVRM: More than %d GPUs found.",
|
||||
NV_MAX_GPUS);
|
||||
count = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
gpu_info[count].gpu_id = nv->gpu_id;
|
||||
|
||||
gpu_info[count].pci_info.domain = nv->pci_info.domain;
|
||||
gpu_info[count].pci_info.bus = nv->pci_info.bus;
|
||||
gpu_info[count].pci_info.slot = nv->pci_info.slot;
|
||||
gpu_info[count].pci_info.function = nv->pci_info.function;
|
||||
|
||||
gpu_info[count].os_device_ptr = nvl->dev;
|
||||
|
||||
count++;
|
||||
}
|
||||
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops)
|
||||
{
|
||||
const nvidia_modeset_rm_ops_t local_rm_ops = {
|
||||
.version_string = NV_VERSION_STRING,
|
||||
.system_info = {
|
||||
.allow_write_combining = NV_FALSE,
|
||||
},
|
||||
.alloc_stack = nvidia_modeset_rm_ops_alloc_stack,
|
||||
.free_stack = nvidia_modeset_rm_ops_free_stack,
|
||||
.enumerate_gpus = nvidia_modeset_enumerate_gpus,
|
||||
.open_gpu = nvidia_dev_get,
|
||||
.close_gpu = nvidia_dev_put,
|
||||
.op = rm_kernel_rmapi_op, /* provided by nv-kernel.o */
|
||||
.set_callbacks = nvidia_modeset_set_callbacks,
|
||||
};
|
||||
|
||||
if (strcmp(rm_ops->version_string, NV_VERSION_STRING) != 0)
|
||||
{
|
||||
rm_ops->version_string = NV_VERSION_STRING;
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
|
||||
*rm_ops = local_rm_ops;
|
||||
|
||||
if (NV_ALLOW_WRITE_COMBINING(NV_MEMORY_TYPE_FRAMEBUFFER)) {
|
||||
rm_ops->system_info.allow_write_combining = NV_TRUE;
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_get_rm_ops);
|
||||
169
kernel-open/nvidia/nv-msi.c
Normal file
169
kernel-open/nvidia/nv-msi.c
Normal file
@@ -0,0 +1,169 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nv-msi.h"
|
||||
#include "nv-proto.h"
|
||||
|
||||
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
|
||||
void NV_API_CALL nv_init_msi(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
int rc = 0;
|
||||
|
||||
rc = pci_enable_msi(nvl->pci_dev);
|
||||
if (rc == 0)
|
||||
{
|
||||
nv->interrupt_line = nvl->pci_dev->irq;
|
||||
nv->flags |= NV_FLAG_USES_MSI;
|
||||
nvl->num_intr = 1;
|
||||
NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * nvl->num_intr);
|
||||
|
||||
if (nvl->irq_count == NULL)
|
||||
{
|
||||
nv->flags &= ~NV_FLAG_USES_MSI;
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"Failed to allocate counter for MSI entry; "
|
||||
"falling back to PCIe virtual-wire interrupts.\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * nvl->num_intr);
|
||||
nvl->current_num_irq_tracked = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
nv->flags &= ~NV_FLAG_USES_MSI;
|
||||
if (nvl->pci_dev->irq != 0)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"Failed to enable MSI; "
|
||||
"falling back to PCIe virtual-wire interrupts.\n");
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_init_msix(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
int num_intr = 0;
|
||||
struct msix_entry *msix_entries;
|
||||
int rc = 0;
|
||||
int i;
|
||||
|
||||
NV_SPIN_LOCK_INIT(&nvl->msix_isr_lock);
|
||||
|
||||
rc = os_alloc_mutex(&nvl->msix_bh_mutex);
|
||||
if (rc != 0)
|
||||
goto failed;
|
||||
|
||||
num_intr = nv_get_max_irq(nvl->pci_dev);
|
||||
|
||||
if (num_intr > NV_RM_MAX_MSIX_LINES)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Reducing MSI-X count from %d to the "
|
||||
"driver-supported maximum %d.\n", num_intr, NV_RM_MAX_MSIX_LINES);
|
||||
num_intr = NV_RM_MAX_MSIX_LINES;
|
||||
}
|
||||
|
||||
NV_KMALLOC(nvl->msix_entries, sizeof(struct msix_entry) * num_intr);
|
||||
if (nvl->msix_entries == NULL)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate MSI-X entries.\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0, msix_entries = nvl->msix_entries; i < num_intr; i++, msix_entries++)
|
||||
{
|
||||
msix_entries->entry = i;
|
||||
}
|
||||
|
||||
NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
|
||||
|
||||
if (nvl->irq_count == NULL)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate counter for MSI-X entries.\n");
|
||||
goto failed;
|
||||
}
|
||||
else
|
||||
{
|
||||
memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * num_intr);
|
||||
nvl->current_num_irq_tracked = 0;
|
||||
}
|
||||
rc = nv_pci_enable_msix(nvl, num_intr);
|
||||
if (rc != NV_OK)
|
||||
goto failed;
|
||||
|
||||
nv->flags |= NV_FLAG_USES_MSIX;
|
||||
return;
|
||||
|
||||
failed:
|
||||
nv->flags &= ~NV_FLAG_USES_MSIX;
|
||||
|
||||
if (nvl->msix_entries)
|
||||
{
|
||||
NV_KFREE(nvl->msix_entries, sizeof(struct msix_entry) * num_intr);
|
||||
}
|
||||
|
||||
if (nvl->irq_count)
|
||||
{
|
||||
NV_KFREE(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
|
||||
}
|
||||
|
||||
if (nvl->msix_bh_mutex)
|
||||
{
|
||||
os_free_mutex(nvl->msix_bh_mutex);
|
||||
nvl->msix_bh_mutex = NULL;
|
||||
}
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to enable MSI-X.\n");
|
||||
}
|
||||
|
||||
NvS32 NV_API_CALL nv_request_msix_irq(nv_linux_state_t *nvl)
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
struct msix_entry *msix_entries;
|
||||
int rc = NV_ERR_INVALID_ARGUMENT;
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
|
||||
for (i = 0, msix_entries = nvl->msix_entries; i < nvl->num_intr;
|
||||
i++, msix_entries++)
|
||||
{
|
||||
rc = request_threaded_irq(msix_entries->vector, nvidia_isr_msix,
|
||||
nvidia_isr_msix_kthread_bh, nv_default_irq_flags(nv),
|
||||
nv_device_name, (void *)nvl);
|
||||
if (rc)
|
||||
{
|
||||
for( j = 0; j < i; j++)
|
||||
{
|
||||
free_irq(nvl->msix_entries[i].vector, (void *)nvl);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
956
kernel-open/nvidia/nv-p2p.c
Normal file
956
kernel-open/nvidia/nv-p2p.c
Normal file
@@ -0,0 +1,956 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-ibmnpu.h"
|
||||
#include "nv-rsync.h"
|
||||
|
||||
#include "nv-p2p.h"
|
||||
#include "rmp2pdefines.h"
|
||||
|
||||
typedef struct nv_p2p_dma_mapping {
|
||||
struct list_head list_node;
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping;
|
||||
} nv_p2p_dma_mapping_t;
|
||||
|
||||
typedef struct nv_p2p_mem_info {
|
||||
void (*free_callback)(void *data);
|
||||
void *data;
|
||||
struct nvidia_p2p_page_table page_table;
|
||||
struct {
|
||||
struct list_head list_head;
|
||||
struct semaphore lock;
|
||||
} dma_mapping_list;
|
||||
NvBool bPersistent;
|
||||
void *private;
|
||||
} nv_p2p_mem_info_t;
|
||||
|
||||
int nvidia_p2p_cap_persistent_pages = 1;
|
||||
EXPORT_SYMBOL(nvidia_p2p_cap_persistent_pages);
|
||||
|
||||
// declared and created in nv.c
|
||||
extern void *nvidia_p2p_page_t_cache;
|
||||
|
||||
static struct nvidia_status_mapping {
|
||||
NV_STATUS status;
|
||||
int error;
|
||||
} nvidia_status_mappings[] = {
|
||||
{ NV_ERR_GENERIC, -EIO },
|
||||
{ NV_ERR_INSUFFICIENT_RESOURCES, -ENOMEM },
|
||||
{ NV_ERR_NO_MEMORY, -ENOMEM },
|
||||
{ NV_ERR_INVALID_ARGUMENT, -EINVAL },
|
||||
{ NV_ERR_INVALID_OBJECT_HANDLE, -EINVAL },
|
||||
{ NV_ERR_INVALID_STATE, -EIO },
|
||||
{ NV_ERR_NOT_SUPPORTED, -ENOTSUPP },
|
||||
{ NV_ERR_OBJECT_NOT_FOUND, -EINVAL },
|
||||
{ NV_ERR_STATE_IN_USE, -EBUSY },
|
||||
{ NV_ERR_GPU_UUID_NOT_FOUND, -ENODEV },
|
||||
{ NV_OK, 0 },
|
||||
};
|
||||
|
||||
#define NVIDIA_STATUS_MAPPINGS \
|
||||
(sizeof(nvidia_status_mappings) / sizeof(struct nvidia_status_mapping))
|
||||
|
||||
static int nvidia_p2p_map_status(NV_STATUS status)
|
||||
{
|
||||
int error = -EIO;
|
||||
uint8_t i;
|
||||
|
||||
for (i = 0; i < NVIDIA_STATUS_MAPPINGS; i++)
|
||||
{
|
||||
if (nvidia_status_mappings[i].status == status)
|
||||
{
|
||||
error = nvidia_status_mappings[i].error;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
static NvU32 nvidia_p2p_page_size_mappings[NVIDIA_P2P_PAGE_SIZE_COUNT] = {
|
||||
NVRM_P2P_PAGESIZE_SMALL_4K, NVRM_P2P_PAGESIZE_BIG_64K, NVRM_P2P_PAGESIZE_BIG_128K
|
||||
};
|
||||
|
||||
static NV_STATUS nvidia_p2p_map_page_size(NvU32 page_size, NvU32 *page_size_index)
|
||||
{
|
||||
NvU32 i;
|
||||
|
||||
for (i = 0; i < NVIDIA_P2P_PAGE_SIZE_COUNT; i++)
|
||||
{
|
||||
if (nvidia_p2p_page_size_mappings[i] == page_size)
|
||||
{
|
||||
*page_size_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == NVIDIA_P2P_PAGE_SIZE_COUNT)
|
||||
return NV_ERR_GENERIC;
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static NV_STATUS nv_p2p_insert_dma_mapping(
|
||||
struct nv_p2p_mem_info *mem_info,
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
struct nv_p2p_dma_mapping *node;
|
||||
|
||||
status = os_alloc_mem((void**)&node, sizeof(*node));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
return status;
|
||||
}
|
||||
|
||||
down(&mem_info->dma_mapping_list.lock);
|
||||
|
||||
node->dma_mapping = dma_mapping;
|
||||
list_add_tail(&node->list_node, &mem_info->dma_mapping_list.list_head);
|
||||
|
||||
up(&mem_info->dma_mapping_list.lock);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static struct nvidia_p2p_dma_mapping* nv_p2p_remove_dma_mapping(
|
||||
struct nv_p2p_mem_info *mem_info,
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping
|
||||
)
|
||||
{
|
||||
struct nv_p2p_dma_mapping *cur;
|
||||
struct nvidia_p2p_dma_mapping *ret_dma_mapping = NULL;
|
||||
|
||||
down(&mem_info->dma_mapping_list.lock);
|
||||
|
||||
list_for_each_entry(cur, &mem_info->dma_mapping_list.list_head, list_node)
|
||||
{
|
||||
if (dma_mapping == NULL || dma_mapping == cur->dma_mapping)
|
||||
{
|
||||
ret_dma_mapping = cur->dma_mapping;
|
||||
list_del(&cur->list_node);
|
||||
os_free_mem(cur);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
up(&mem_info->dma_mapping_list.lock);
|
||||
|
||||
return ret_dma_mapping;
|
||||
}
|
||||
|
||||
static void nv_p2p_free_dma_mapping(
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping
|
||||
)
|
||||
{
|
||||
nv_dma_device_t peer_dma_dev = {{ 0 }};
|
||||
NvU32 page_size;
|
||||
NV_STATUS status;
|
||||
NvU32 i;
|
||||
|
||||
peer_dma_dev.dev = &dma_mapping->pci_dev->dev;
|
||||
peer_dma_dev.addressable_range.limit = dma_mapping->pci_dev->dma_mask;
|
||||
|
||||
page_size = nvidia_p2p_page_size_mappings[dma_mapping->page_size_type];
|
||||
|
||||
if (dma_mapping->private != NULL)
|
||||
{
|
||||
WARN_ON(page_size != PAGE_SIZE);
|
||||
|
||||
status = nv_dma_unmap_alloc(&peer_dma_dev,
|
||||
dma_mapping->entries,
|
||||
dma_mapping->dma_addresses,
|
||||
&dma_mapping->private);
|
||||
WARN_ON(status != NV_OK);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (i = 0; i < dma_mapping->entries; i++)
|
||||
{
|
||||
nv_dma_unmap_peer(&peer_dma_dev, page_size / PAGE_SIZE,
|
||||
dma_mapping->dma_addresses[i]);
|
||||
}
|
||||
}
|
||||
|
||||
os_free_mem(dma_mapping->dma_addresses);
|
||||
|
||||
os_free_mem(dma_mapping);
|
||||
}
|
||||
|
||||
static void nv_p2p_free_page_table(
|
||||
struct nvidia_p2p_page_table *page_table
|
||||
)
|
||||
{
|
||||
NvU32 i;
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping;
|
||||
struct nv_p2p_mem_info *mem_info = NULL;
|
||||
|
||||
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
|
||||
|
||||
dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL);
|
||||
while (dma_mapping != NULL)
|
||||
{
|
||||
nv_p2p_free_dma_mapping(dma_mapping);
|
||||
|
||||
dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < page_table->entries; i++)
|
||||
{
|
||||
NV_KMEM_CACHE_FREE(page_table->pages[i], nvidia_p2p_page_t_cache);
|
||||
}
|
||||
|
||||
if (page_table->gpu_uuid != NULL)
|
||||
{
|
||||
os_free_mem(page_table->gpu_uuid);
|
||||
}
|
||||
|
||||
if (page_table->pages != NULL)
|
||||
{
|
||||
os_free_mem(page_table->pages);
|
||||
}
|
||||
|
||||
os_free_mem(mem_info);
|
||||
}
|
||||
|
||||
static NV_STATUS nv_p2p_put_pages(
|
||||
nvidia_stack_t * sp,
|
||||
uint64_t p2p_token,
|
||||
uint32_t va_space,
|
||||
uint64_t virtual_address,
|
||||
struct nvidia_p2p_page_table **page_table
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
struct nv_p2p_mem_info *mem_info = NULL;
|
||||
|
||||
mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table);
|
||||
|
||||
/*
|
||||
* rm_p2p_put_pages returns NV_OK if the page_table was found and
|
||||
* got unlinked from the RM's tracker (atomically). This ensures that
|
||||
* RM's tear-down path does not race with this path.
|
||||
*
|
||||
* rm_p2p_put_pages returns NV_ERR_OBJECT_NOT_FOUND if the page_table
|
||||
* was already unlinked.
|
||||
*/
|
||||
if (mem_info->bPersistent)
|
||||
{
|
||||
status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = rm_p2p_put_pages(sp, p2p_token, va_space,
|
||||
virtual_address, *page_table);
|
||||
}
|
||||
|
||||
if (status == NV_OK)
|
||||
{
|
||||
nv_p2p_free_page_table(*page_table);
|
||||
*page_table = NULL;
|
||||
}
|
||||
else if (!mem_info->bPersistent && (status == NV_ERR_OBJECT_NOT_FOUND))
|
||||
{
|
||||
status = NV_OK;
|
||||
*page_table = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
WARN_ON(status != NV_OK);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_p2p_free_platform_data(
|
||||
void *data
|
||||
)
|
||||
{
|
||||
if (data == NULL)
|
||||
{
|
||||
WARN_ON(data == NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
nv_p2p_free_page_table((struct nvidia_p2p_page_table*)data);
|
||||
}
|
||||
|
||||
int nvidia_p2p_init_mapping(
|
||||
uint64_t p2p_token,
|
||||
struct nvidia_p2p_params *params,
|
||||
void (*destroy_callback)(void *data),
|
||||
void *data
|
||||
)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_init_mapping);
|
||||
|
||||
int nvidia_p2p_destroy_mapping(uint64_t p2p_token)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_destroy_mapping);
|
||||
|
||||
static void nv_p2p_mem_info_free_callback(void *data)
|
||||
{
|
||||
nv_p2p_mem_info_t *mem_info = (nv_p2p_mem_info_t*) data;
|
||||
|
||||
mem_info->free_callback(mem_info->data);
|
||||
|
||||
nv_p2p_free_platform_data(&mem_info->page_table);
|
||||
}
|
||||
|
||||
int nvidia_p2p_get_pages(
|
||||
uint64_t p2p_token,
|
||||
uint32_t va_space,
|
||||
uint64_t virtual_address,
|
||||
uint64_t length,
|
||||
struct nvidia_p2p_page_table **page_table,
|
||||
void (*free_callback)(void * data),
|
||||
void *data
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
struct nvidia_p2p_page *page;
|
||||
struct nv_p2p_mem_info *mem_info = NULL;
|
||||
NvU32 entries;
|
||||
NvU32 *wreqmb_h = NULL;
|
||||
NvU32 *rreqmb_h = NULL;
|
||||
NvU64 *physical_addresses = NULL;
|
||||
NvU32 page_count;
|
||||
NvU32 i = 0;
|
||||
NvBool bGetPages = NV_FALSE;
|
||||
NvBool bGetUuid = NV_FALSE;
|
||||
NvU32 page_size = NVRM_P2P_PAGESIZE_BIG_64K;
|
||||
NvU32 page_size_index;
|
||||
NvU64 temp_length;
|
||||
NvU8 *gpu_uuid = NULL;
|
||||
NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0};
|
||||
int rc;
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
|
||||
*page_table = NULL;
|
||||
status = os_alloc_mem((void **)&mem_info, sizeof(*mem_info));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
memset(mem_info, 0, sizeof(*mem_info));
|
||||
|
||||
INIT_LIST_HEAD(&mem_info->dma_mapping_list.list_head);
|
||||
NV_INIT_MUTEX(&mem_info->dma_mapping_list.lock);
|
||||
|
||||
*page_table = &(mem_info->page_table);
|
||||
|
||||
mem_info->bPersistent = (free_callback == NULL);
|
||||
|
||||
//asign length to temporary variable since do_div macro does in-place division
|
||||
temp_length = length;
|
||||
do_div(temp_length, page_size);
|
||||
page_count = temp_length;
|
||||
|
||||
if (length & (page_size - 1))
|
||||
{
|
||||
page_count++;
|
||||
}
|
||||
|
||||
status = os_alloc_mem((void **)&physical_addresses,
|
||||
(page_count * sizeof(NvU64)));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
status = os_alloc_mem((void **)&wreqmb_h, (page_count * sizeof(NvU32)));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
status = os_alloc_mem((void **)&rreqmb_h, (page_count * sizeof(NvU32)));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (mem_info->bPersistent)
|
||||
{
|
||||
void *gpu_info = NULL;
|
||||
|
||||
if ((p2p_token != 0) || (va_space != 0))
|
||||
{
|
||||
status = -ENOTSUPP;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
status = rm_p2p_get_gpu_info(sp, virtual_address, length, &gpu_uuid, &gpu_info);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
rc = nvidia_dev_get_uuid(gpu_uuid, sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_GPU_UUID_NOT_FOUND;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
os_mem_copy(uuid, gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN);
|
||||
|
||||
bGetUuid = NV_TRUE;
|
||||
|
||||
status = rm_p2p_get_pages_persistent(sp, virtual_address, length, &mem_info->private,
|
||||
physical_addresses, &entries, *page_table, gpu_info);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Get regular old-style, non-persistent mappings
|
||||
status = rm_p2p_get_pages(sp, p2p_token, va_space,
|
||||
virtual_address, length, physical_addresses, wreqmb_h,
|
||||
rreqmb_h, &entries, &gpu_uuid, *page_table);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
bGetPages = NV_TRUE;
|
||||
(*page_table)->gpu_uuid = gpu_uuid;
|
||||
|
||||
status = os_alloc_mem((void *)&(*page_table)->pages,
|
||||
(entries * sizeof(page)));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
(*page_table)->version = NVIDIA_P2P_PAGE_TABLE_VERSION;
|
||||
|
||||
for (i = 0; i < entries; i++)
|
||||
{
|
||||
page = NV_KMEM_CACHE_ALLOC(nvidia_p2p_page_t_cache);
|
||||
if (page == NULL)
|
||||
{
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
memset(page, 0, sizeof(*page));
|
||||
|
||||
page->physical_address = physical_addresses[i];
|
||||
page->registers.fermi.wreqmb_h = wreqmb_h[i];
|
||||
page->registers.fermi.rreqmb_h = rreqmb_h[i];
|
||||
|
||||
(*page_table)->pages[i] = page;
|
||||
(*page_table)->entries++;
|
||||
}
|
||||
|
||||
status = nvidia_p2p_map_page_size(page_size, &page_size_index);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
(*page_table)->page_size = page_size_index;
|
||||
|
||||
os_free_mem(physical_addresses);
|
||||
os_free_mem(wreqmb_h);
|
||||
os_free_mem(rreqmb_h);
|
||||
|
||||
if (free_callback != NULL)
|
||||
{
|
||||
mem_info->free_callback = free_callback;
|
||||
mem_info->data = data;
|
||||
|
||||
status = rm_p2p_register_callback(sp, p2p_token, virtual_address, length,
|
||||
*page_table, nv_p2p_mem_info_free_callback, mem_info);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return nvidia_p2p_map_status(status);
|
||||
|
||||
failed:
|
||||
if (physical_addresses != NULL)
|
||||
{
|
||||
os_free_mem(physical_addresses);
|
||||
}
|
||||
if (wreqmb_h != NULL)
|
||||
{
|
||||
os_free_mem(wreqmb_h);
|
||||
}
|
||||
if (rreqmb_h != NULL)
|
||||
{
|
||||
os_free_mem(rreqmb_h);
|
||||
}
|
||||
|
||||
if (bGetPages)
|
||||
{
|
||||
(void)nv_p2p_put_pages(sp, p2p_token, va_space,
|
||||
virtual_address, page_table);
|
||||
}
|
||||
|
||||
if (bGetUuid)
|
||||
{
|
||||
nvidia_dev_put_uuid(uuid, sp);
|
||||
}
|
||||
|
||||
if (*page_table != NULL)
|
||||
{
|
||||
nv_p2p_free_page_table(*page_table);
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return nvidia_p2p_map_status(status);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_get_pages);
|
||||
|
||||
/*
|
||||
* This function is a no-op, but is left in place (for now), in order to allow
|
||||
* third-party callers to build and run without errors or warnings. This is OK,
|
||||
* because the missing functionality is provided by nv_p2p_free_platform_data,
|
||||
* which is being called as part of the RM's cleanup path.
|
||||
*/
|
||||
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_free_page_table);
|
||||
|
||||
int nvidia_p2p_put_pages(
|
||||
uint64_t p2p_token,
|
||||
uint32_t va_space,
|
||||
uint64_t virtual_address,
|
||||
struct nvidia_p2p_page_table *page_table
|
||||
)
|
||||
{
|
||||
struct nv_p2p_mem_info *mem_info = NULL;
|
||||
NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0};
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
int rc = 0;
|
||||
|
||||
os_mem_copy(uuid, page_table->gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN);
|
||||
|
||||
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
status = nv_p2p_put_pages(sp, p2p_token, va_space,
|
||||
virtual_address, &page_table);
|
||||
|
||||
if (mem_info->bPersistent)
|
||||
{
|
||||
nvidia_dev_put_uuid(uuid, sp);
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return nvidia_p2p_map_status(status);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_put_pages);
|
||||
|
||||
int nvidia_p2p_dma_map_pages(
|
||||
struct pci_dev *peer,
|
||||
struct nvidia_p2p_page_table *page_table,
|
||||
struct nvidia_p2p_dma_mapping **dma_mapping
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nv_dma_device_t peer_dma_dev = {{ 0 }};
|
||||
nvidia_stack_t *sp = NULL;
|
||||
NvU64 *dma_addresses = NULL;
|
||||
NvU32 page_count;
|
||||
NvU32 page_size;
|
||||
enum nvidia_p2p_page_size_type page_size_type;
|
||||
struct nv_p2p_mem_info *mem_info = NULL;
|
||||
NvU32 i;
|
||||
void *priv;
|
||||
int rc;
|
||||
|
||||
if (peer == NULL || page_table == NULL || dma_mapping == NULL ||
|
||||
page_table->gpu_uuid == NULL)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
|
||||
*dma_mapping = NULL;
|
||||
status = os_alloc_mem((void **)dma_mapping, sizeof(**dma_mapping));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
memset(*dma_mapping, 0, sizeof(**dma_mapping));
|
||||
|
||||
page_count = page_table->entries;
|
||||
|
||||
status = os_alloc_mem((void **)&dma_addresses,
|
||||
(page_count * sizeof(NvU64)));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
page_size_type = page_table->page_size;
|
||||
|
||||
BUG_ON((page_size_type <= NVIDIA_P2P_PAGE_SIZE_4KB) ||
|
||||
(page_size_type >= NVIDIA_P2P_PAGE_SIZE_COUNT));
|
||||
|
||||
peer_dma_dev.dev = &peer->dev;
|
||||
peer_dma_dev.addressable_range.limit = peer->dma_mask;
|
||||
|
||||
page_size = nvidia_p2p_page_size_mappings[page_size_type];
|
||||
|
||||
for (i = 0; i < page_count; i++)
|
||||
{
|
||||
dma_addresses[i] = page_table->pages[i]->physical_address;
|
||||
}
|
||||
|
||||
status = rm_p2p_dma_map_pages(sp, &peer_dma_dev,
|
||||
page_table->gpu_uuid, page_size, page_count, dma_addresses, &priv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
(*dma_mapping)->version = NVIDIA_P2P_DMA_MAPPING_VERSION;
|
||||
(*dma_mapping)->page_size_type = page_size_type;
|
||||
(*dma_mapping)->entries = page_count;
|
||||
(*dma_mapping)->dma_addresses = dma_addresses;
|
||||
(*dma_mapping)->private = priv;
|
||||
(*dma_mapping)->pci_dev = peer;
|
||||
|
||||
/*
|
||||
* All success, it is safe to insert dma_mapping now.
|
||||
*/
|
||||
status = nv_p2p_insert_dma_mapping(mem_info, *dma_mapping);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed_insert;
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return 0;
|
||||
|
||||
failed_insert:
|
||||
nv_p2p_free_dma_mapping(*dma_mapping);
|
||||
dma_addresses = NULL;
|
||||
*dma_mapping = NULL;
|
||||
|
||||
failed:
|
||||
if (dma_addresses != NULL)
|
||||
{
|
||||
os_free_mem(dma_addresses);
|
||||
}
|
||||
|
||||
if (*dma_mapping != NULL)
|
||||
{
|
||||
os_free_mem(*dma_mapping);
|
||||
*dma_mapping = NULL;
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return nvidia_p2p_map_status(status);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
|
||||
|
||||
int nvidia_p2p_dma_unmap_pages(
|
||||
struct pci_dev *peer,
|
||||
struct nvidia_p2p_page_table *page_table,
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping
|
||||
)
|
||||
{
|
||||
struct nv_p2p_mem_info *mem_info = NULL;
|
||||
|
||||
if (peer == NULL || dma_mapping == NULL || page_table == NULL)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
|
||||
|
||||
/*
|
||||
* nv_p2p_remove_dma_mapping returns dma_mapping if the dma_mapping was
|
||||
* found and got unlinked from the mem_info->dma_mapping_list (atomically).
|
||||
* This ensures that the RM's tear-down path does not race with this path.
|
||||
*
|
||||
* nv_p2p_remove_dma_mappings returns NULL if the dma_mapping was already
|
||||
* unlinked.
|
||||
*/
|
||||
if (nv_p2p_remove_dma_mapping(mem_info, dma_mapping) == NULL)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
WARN_ON(peer != dma_mapping->pci_dev);
|
||||
|
||||
BUG_ON((dma_mapping->page_size_type <= NVIDIA_P2P_PAGE_SIZE_4KB) ||
|
||||
(dma_mapping->page_size_type >= NVIDIA_P2P_PAGE_SIZE_COUNT));
|
||||
|
||||
nv_p2p_free_dma_mapping(dma_mapping);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
|
||||
|
||||
/*
|
||||
* This function is a no-op, but is left in place (for now), in order to allow
|
||||
* third-party callers to build and run without errors or warnings. This is OK,
|
||||
* because the missing functionality is provided by nv_p2p_free_platform_data,
|
||||
* which is being called as part of the RM's cleanup path.
|
||||
*/
|
||||
int nvidia_p2p_free_dma_mapping(
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping
|
||||
)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);
|
||||
|
||||
int nvidia_p2p_register_rsync_driver(
|
||||
nvidia_p2p_rsync_driver_t *driver,
|
||||
void *data
|
||||
)
|
||||
{
|
||||
if (driver == NULL)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver))
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (driver->get_relaxed_ordering_mode == NULL ||
|
||||
driver->put_relaxed_ordering_mode == NULL ||
|
||||
driver->wait_for_rsync == NULL)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return nv_register_rsync_driver(driver->get_relaxed_ordering_mode,
|
||||
driver->put_relaxed_ordering_mode,
|
||||
driver->wait_for_rsync, data);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver);
|
||||
|
||||
void nvidia_p2p_unregister_rsync_driver(
|
||||
nvidia_p2p_rsync_driver_t *driver,
|
||||
void *data
|
||||
)
|
||||
{
|
||||
if (driver == NULL)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver))
|
||||
{
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (driver->get_relaxed_ordering_mode == NULL ||
|
||||
driver->put_relaxed_ordering_mode == NULL ||
|
||||
driver->wait_for_rsync == NULL)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
nv_unregister_rsync_driver(driver->get_relaxed_ordering_mode,
|
||||
driver->put_relaxed_ordering_mode,
|
||||
driver->wait_for_rsync, data);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver);
|
||||
|
||||
int nvidia_p2p_get_rsync_registers(
|
||||
nvidia_p2p_rsync_reg_info_t **reg_info
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl;
|
||||
nv_state_t *nv;
|
||||
NV_STATUS status;
|
||||
void *ptr = NULL;
|
||||
NvU64 addr;
|
||||
NvU64 size;
|
||||
struct pci_dev *ibmnpu = NULL;
|
||||
NvU32 index = 0;
|
||||
NvU32 count = 0;
|
||||
nvidia_p2p_rsync_reg_info_t *info = NULL;
|
||||
nvidia_p2p_rsync_reg_t *regs = NULL;
|
||||
|
||||
if (reg_info == NULL)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
status = os_alloc_mem((void**)&info, sizeof(*info));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
|
||||
info->version = NVIDIA_P2P_RSYNC_REG_INFO_VERSION;
|
||||
|
||||
LOCK_NV_LINUX_DEVICES();
|
||||
|
||||
for (nvl = nv_linux_devices; nvl; nvl = nvl->next)
|
||||
{
|
||||
count++;
|
||||
}
|
||||
|
||||
status = os_alloc_mem((void**)®s, (count * sizeof(*regs)));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
nvidia_p2p_put_rsync_registers(info);
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (nvl = nv_linux_devices; nvl; nvl = nvl->next)
|
||||
{
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
|
||||
addr = 0;
|
||||
size = 0;
|
||||
|
||||
status = nv_get_ibmnpu_genreg_info(nv, &addr, &size, (void**)&ibmnpu);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
ptr = nv_ioremap_nocache(addr, size);
|
||||
if (ptr == NULL)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
regs[index].ptr = ptr;
|
||||
regs[index].size = size;
|
||||
regs[index].gpu = nvl->pci_dev;
|
||||
regs[index].ibmnpu = ibmnpu;
|
||||
regs[index].cluster_id = 0;
|
||||
regs[index].socket_id = nv_get_ibmnpu_chip_id(nv);
|
||||
|
||||
index++;
|
||||
}
|
||||
|
||||
UNLOCK_NV_LINUX_DEVICES();
|
||||
|
||||
info->regs = regs;
|
||||
info->entries = index;
|
||||
|
||||
if (info->entries == 0)
|
||||
{
|
||||
nvidia_p2p_put_rsync_registers(info);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
*reg_info = info;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
|
||||
|
||||
void nvidia_p2p_put_rsync_registers(
|
||||
nvidia_p2p_rsync_reg_info_t *reg_info
|
||||
)
|
||||
{
|
||||
NvU32 i;
|
||||
nvidia_p2p_rsync_reg_t *regs = NULL;
|
||||
|
||||
if (reg_info == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (reg_info->regs)
|
||||
{
|
||||
for (i = 0; i < reg_info->entries; i++)
|
||||
{
|
||||
regs = ®_info->regs[i];
|
||||
|
||||
if (regs->ptr)
|
||||
{
|
||||
nv_iounmap(regs->ptr, regs->size);
|
||||
}
|
||||
}
|
||||
|
||||
os_free_mem(reg_info->regs);
|
||||
}
|
||||
|
||||
os_free_mem(reg_info);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);
|
||||
427
kernel-open/nvidia/nv-p2p.h
Normal file
427
kernel-open/nvidia/nv-p2p.h
Normal file
@@ -0,0 +1,427 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NV_P2P_H_
|
||||
#define _NV_P2P_H_
|
||||
|
||||
/*
|
||||
* NVIDIA P2P Structure Versioning
|
||||
*
|
||||
* For the nvidia_p2p_*_t structures allocated by the NVIDIA driver, it will
|
||||
* set the version field of the structure according to the definition used by
|
||||
* the NVIDIA driver. The "major" field of the version is defined as the upper
|
||||
* 16 bits, and the "minor" field of the version is defined as the lower 16
|
||||
* bits. The version field will always be the first 4 bytes of the structure,
|
||||
* and third-party drivers should check the value of this field in structures
|
||||
* allocated by the NVIDIA driver to ensure runtime compatibility.
|
||||
*
|
||||
* In general, version numbers will be incremented as follows:
|
||||
* - When a backwards-compatible change is made to the structure layout, the
|
||||
* minor version for that structure will be incremented. Third-party drivers
|
||||
* built against an older minor version will continue to work with the newer
|
||||
* minor version used by the NVIDIA driver, without recompilation.
|
||||
* - When a breaking change is made to the structure layout, the major version
|
||||
* will be incremented. Third-party drivers built against an older major
|
||||
* version require at least recompilation and potentially additional updates
|
||||
* to use the new API.
|
||||
*/
|
||||
#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000
|
||||
#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff
|
||||
|
||||
#define NVIDIA_P2P_MAJOR_VERSION(v) \
|
||||
(((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16)
|
||||
|
||||
#define NVIDIA_P2P_MINOR_VERSION(v) \
|
||||
(((v) & NVIDIA_P2P_MINOR_VERSION_MASK))
|
||||
|
||||
#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \
|
||||
(NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v))
|
||||
|
||||
#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \
|
||||
(NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \
|
||||
(NVIDIA_P2P_MINOR_VERSION((p)->version) >= (NVIDIA_P2P_MINOR_VERSION(v))))
|
||||
|
||||
enum {
|
||||
NVIDIA_P2P_ARCHITECTURE_TESLA = 0,
|
||||
NVIDIA_P2P_ARCHITECTURE_FERMI,
|
||||
NVIDIA_P2P_ARCHITECTURE_CURRENT = NVIDIA_P2P_ARCHITECTURE_FERMI
|
||||
};
|
||||
|
||||
#define NVIDIA_P2P_PARAMS_VERSION 0x00010001
|
||||
|
||||
enum {
|
||||
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_GPU = 0,
|
||||
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE,
|
||||
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX = \
|
||||
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE
|
||||
};
|
||||
|
||||
#define NVIDIA_P2P_GPU_UUID_LEN 16
|
||||
|
||||
typedef
|
||||
struct nvidia_p2p_params {
|
||||
uint32_t version;
|
||||
uint32_t architecture;
|
||||
union nvidia_p2p_mailbox_addresses {
|
||||
struct {
|
||||
uint64_t wmb_addr;
|
||||
uint64_t wmb_data;
|
||||
uint64_t rreq_addr;
|
||||
uint64_t rcomp_addr;
|
||||
uint64_t reserved[2];
|
||||
} fermi;
|
||||
} addresses[NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX+1];
|
||||
} nvidia_p2p_params_t;
|
||||
|
||||
/*
|
||||
* Capability flag for users to detect
|
||||
* driver support for persistent pages.
|
||||
*/
|
||||
extern int nvidia_p2p_cap_persistent_pages;
|
||||
#define NVIDIA_P2P_CAP_PERSISTENT_PAGES
|
||||
|
||||
/*
|
||||
* This API is not supported.
|
||||
*/
|
||||
int nvidia_p2p_init_mapping(uint64_t p2p_token,
|
||||
struct nvidia_p2p_params *params,
|
||||
void (*destroy_callback)(void *data),
|
||||
void *data);
|
||||
|
||||
/*
|
||||
* This API is not supported.
|
||||
*/
|
||||
int nvidia_p2p_destroy_mapping(uint64_t p2p_token);
|
||||
|
||||
enum nvidia_p2p_page_size_type {
|
||||
NVIDIA_P2P_PAGE_SIZE_4KB = 0,
|
||||
NVIDIA_P2P_PAGE_SIZE_64KB,
|
||||
NVIDIA_P2P_PAGE_SIZE_128KB,
|
||||
NVIDIA_P2P_PAGE_SIZE_COUNT
|
||||
};
|
||||
|
||||
typedef
|
||||
struct nvidia_p2p_page {
|
||||
uint64_t physical_address;
|
||||
union nvidia_p2p_request_registers {
|
||||
struct {
|
||||
uint32_t wreqmb_h;
|
||||
uint32_t rreqmb_h;
|
||||
uint32_t rreqmb_0;
|
||||
uint32_t reserved[3];
|
||||
} fermi;
|
||||
} registers;
|
||||
} nvidia_p2p_page_t;
|
||||
|
||||
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010002
|
||||
|
||||
#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \
|
||||
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION)
|
||||
|
||||
typedef
|
||||
struct nvidia_p2p_page_table {
|
||||
uint32_t version;
|
||||
uint32_t page_size; /* enum nvidia_p2p_page_size_type */
|
||||
struct nvidia_p2p_page **pages;
|
||||
uint32_t entries;
|
||||
uint8_t *gpu_uuid;
|
||||
} nvidia_p2p_page_table_t;
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Make the pages underlying a range of GPU virtual memory
|
||||
* accessible to a third-party device.
|
||||
*
|
||||
* This API only supports pinned, GPU-resident memory, such as that provided
|
||||
* by cudaMalloc().
|
||||
*
|
||||
* This API may sleep.
|
||||
*
|
||||
* @param[in] p2p_token
|
||||
* A token that uniquely identifies the P2P mapping.
|
||||
* @param[in] va_space
|
||||
* A GPU virtual address space qualifier.
|
||||
* @param[in] virtual_address
|
||||
* The start address in the specified virtual address space.
|
||||
* Address must be aligned to the 64KB boundary.
|
||||
* @param[in] length
|
||||
* The length of the requested P2P mapping.
|
||||
* Length must be a multiple of 64KB.
|
||||
* @param[out] page_table
|
||||
* A pointer to an array of structures with P2P PTEs.
|
||||
* @param[in] free_callback
|
||||
* A pointer to the function to be invoked when the pages
|
||||
* underlying the virtual address range are freed
|
||||
* implicitly.
|
||||
* If NULL, persistent pages will be returned.
|
||||
* This means the pages underlying the range of GPU virtual memory
|
||||
* will persist until explicitly freed by nvidia_p2p_put_pages().
|
||||
* Persistent GPU memory mappings are not supported on PowerPC,
|
||||
|
||||
|
||||
|
||||
* MIG-enabled devices and vGPU.
|
||||
|
||||
* @param[in] data
|
||||
* A non-NULL opaque pointer to private data to be passed to the
|
||||
* callback function.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL if an invalid argument was supplied.
|
||||
* -ENOTSUPP if the requested operation is not supported.
|
||||
* -ENOMEM if the driver failed to allocate memory or if
|
||||
* insufficient resources were available to complete the operation.
|
||||
* -EIO if an unknown error occurred.
|
||||
*/
|
||||
int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space,
|
||||
uint64_t virtual_address,
|
||||
uint64_t length,
|
||||
struct nvidia_p2p_page_table **page_table,
|
||||
void (*free_callback)(void *data),
|
||||
void *data);
|
||||
|
||||
#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003
|
||||
|
||||
#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \
|
||||
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION)
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
typedef
|
||||
struct nvidia_p2p_dma_mapping {
|
||||
uint32_t version;
|
||||
enum nvidia_p2p_page_size_type page_size_type;
|
||||
uint32_t entries;
|
||||
uint64_t *dma_addresses;
|
||||
void *private;
|
||||
struct pci_dev *pci_dev;
|
||||
} nvidia_p2p_dma_mapping_t;
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Make the physical pages retrieved using nvidia_p2p_get_pages accessible to
|
||||
* a third-party device.
|
||||
*
|
||||
* @param[in] peer
|
||||
* The struct pci_dev * of the peer device that needs to DMA to/from the
|
||||
* mapping.
|
||||
* @param[in] page_table
|
||||
* The page table outlining the physical pages underlying the mapping, as
|
||||
* retrieved with nvidia_p2p_get_pages().
|
||||
* @param[out] dma_mapping
|
||||
* The DMA mapping containing the DMA addresses to use on the third-party
|
||||
* device.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL if an invalid argument was supplied.
|
||||
* -ENOTSUPP if the requested operation is not supported.
|
||||
* -EIO if an unknown error occurred.
|
||||
*/
|
||||
int nvidia_p2p_dma_map_pages(struct pci_dev *peer,
|
||||
struct nvidia_p2p_page_table *page_table,
|
||||
struct nvidia_p2p_dma_mapping **dma_mapping);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Unmap the physical pages previously mapped to the third-party device by
|
||||
* nvidia_p2p_dma_map_pages().
|
||||
*
|
||||
* @param[in] peer
|
||||
* The struct pci_dev * of the peer device that the DMA mapping belongs to.
|
||||
* @param[in] page_table
|
||||
* The page table backing the DMA mapping to be unmapped.
|
||||
* @param[in] dma_mapping
|
||||
* The DMA mapping containing the DMA addresses used by the third-party
|
||||
* device, as retrieved with nvidia_p2p_dma_map_pages(). After this call
|
||||
* returns, neither this struct nor the addresses contained within will be
|
||||
* valid for use by the third-party device.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL if an invalid argument was supplied.
|
||||
* -EIO if an unknown error occurred.
|
||||
*/
|
||||
int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
|
||||
struct nvidia_p2p_page_table *page_table,
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Release a set of pages previously made accessible to
|
||||
* a third-party device.
|
||||
*
|
||||
* @param[in] p2p_token
|
||||
* A token that uniquely identifies the P2P mapping.
|
||||
* @param[in] va_space
|
||||
* A GPU virtual address space qualifier.
|
||||
* @param[in] virtual_address
|
||||
* The start address in the specified virtual address space.
|
||||
* @param[in] page_table
|
||||
* A pointer to the array of structures with P2P PTEs.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL if an invalid argument was supplied.
|
||||
* -EIO if an unknown error occurred.
|
||||
*/
|
||||
int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space,
|
||||
uint64_t virtual_address,
|
||||
struct nvidia_p2p_page_table *page_table);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Free a third-party P2P page table. (This function is a no-op.)
|
||||
*
|
||||
* @param[in] page_table
|
||||
* A pointer to the array of structures with P2P PTEs.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL if an invalid argument was supplied.
|
||||
*/
|
||||
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Free a third-party P2P DMA mapping. (This function is a no-op.)
|
||||
*
|
||||
* @param[in] dma_mapping
|
||||
* A pointer to the DMA mapping structure.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL if an invalid argument was supplied.
|
||||
*/
|
||||
int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping);
|
||||
|
||||
#define NVIDIA_P2P_RSYNC_DRIVER_VERSION 0x00010001
|
||||
|
||||
#define NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(p) \
|
||||
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_DRIVER_VERSION)
|
||||
|
||||
typedef
|
||||
struct nvidia_p2p_rsync_driver {
|
||||
uint32_t version;
|
||||
int (*get_relaxed_ordering_mode)(int *mode, void *data);
|
||||
void (*put_relaxed_ordering_mode)(int mode, void *data);
|
||||
void (*wait_for_rsync)(struct pci_dev *gpu, void *data);
|
||||
} nvidia_p2p_rsync_driver_t;
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Registers the rsync driver.
|
||||
*
|
||||
* @param[in] driver
|
||||
* A pointer to the rsync driver structure. The NVIDIA driver would use,
|
||||
*
|
||||
* get_relaxed_ordering_mode to obtain a reference to the current relaxed
|
||||
* ordering mode (treated as a boolean) from the rsync driver.
|
||||
*
|
||||
* put_relaxed_ordering_mode to release a reference to the current relaxed
|
||||
* ordering mode back to the rsync driver. The NVIDIA driver will call this
|
||||
* function once for each successful call to get_relaxed_ordering_mode, and
|
||||
* the relaxed ordering mode must not change until the last reference is
|
||||
* released.
|
||||
*
|
||||
* wait_for_rsync to call into the rsync module to issue RSYNC. This callback
|
||||
* can't sleep or re-schedule as it may arrive under spinlocks.
|
||||
* @param[in] data
|
||||
* A pointer to the rsync driver's private data.
|
||||
*
|
||||
* @Returns
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL parameters are incorrect.
|
||||
* -EBUSY if a module is already registered or GPU devices are in use.
|
||||
*/
|
||||
int nvidia_p2p_register_rsync_driver(nvidia_p2p_rsync_driver_t *driver,
|
||||
void *data);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Unregisters the rsync driver.
|
||||
*
|
||||
* @param[in] driver
|
||||
* A pointer to the rsync driver structure.
|
||||
* @param[in] data
|
||||
* A pointer to the rsync driver's private data.
|
||||
*/
|
||||
void nvidia_p2p_unregister_rsync_driver(nvidia_p2p_rsync_driver_t *driver,
|
||||
void *data);
|
||||
|
||||
#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION 0x00020001
|
||||
|
||||
#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION_COMPATIBLE(p) \
|
||||
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_REG_INFO_VERSION)
|
||||
|
||||
typedef struct nvidia_p2p_rsync_reg {
|
||||
void *ptr;
|
||||
size_t size;
|
||||
struct pci_dev *ibmnpu;
|
||||
struct pci_dev *gpu;
|
||||
uint32_t cluster_id;
|
||||
uint32_t socket_id;
|
||||
} nvidia_p2p_rsync_reg_t;
|
||||
|
||||
typedef struct nvidia_p2p_rsync_reg_info {
|
||||
uint32_t version;
|
||||
nvidia_p2p_rsync_reg_t *regs;
|
||||
size_t entries;
|
||||
} nvidia_p2p_rsync_reg_info_t;
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Gets rsync (GEN-ID) register information associated with the supported
|
||||
* NPUs.
|
||||
*
|
||||
* The caller would use the returned information {GPU device, NPU device,
|
||||
* socket-id, cluster-id} to pick the optimal generation registers to issue
|
||||
* RSYNC (NVLink HW flush).
|
||||
*
|
||||
* The interface allocates structures to return the information, hence
|
||||
* nvidia_p2p_put_rsync_registers() must be called to free the structures.
|
||||
*
|
||||
* Note, cluster-id is hardcoded to zero as early system configurations would
|
||||
* only support cluster mode i.e. all devices would share the same cluster-id
|
||||
* (0). In the future, appropriate kernel support would be needed to query
|
||||
* cluster-ids.
|
||||
*
|
||||
* @param[out] reg_info
|
||||
* A pointer to the rsync reg info structure.
|
||||
*
|
||||
* @Returns
|
||||
* 0 Upon successful completion. Otherwise, returns negative value.
|
||||
*/
|
||||
int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Frees the structures allocated by nvidia_p2p_get_rsync_registers().
|
||||
*
|
||||
* @param[in] reg_info
|
||||
* A pointer to the rsync reg info structure.
|
||||
*/
|
||||
void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info);
|
||||
|
||||
#endif /* _NV_P2P_H_ */
|
||||
478
kernel-open/nvidia/nv-pat.c
Normal file
478
kernel-open/nvidia/nv-pat.c
Normal file
@@ -0,0 +1,478 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-reg.h"
|
||||
#include "nv-pat.h"
|
||||
|
||||
int nv_pat_mode = NV_PAT_MODE_DISABLED;
|
||||
|
||||
#if defined(NV_ENABLE_PAT_SUPPORT)
|
||||
/*
|
||||
* Private PAT support for use by the NVIDIA driver. This is used on
|
||||
* kernels that do not modify the PAT to include a write-combining
|
||||
* entry.
|
||||
*
|
||||
* On kernels that have CONFIG_X86_PAT, the NVIDIA driver still checks that the
|
||||
* WC entry is as expected before using PAT.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_X86_PAT)
|
||||
#define NV_ENABLE_BUILTIN_PAT_SUPPORT 0
|
||||
#else
|
||||
#define NV_ENABLE_BUILTIN_PAT_SUPPORT 1
|
||||
#endif
|
||||
|
||||
|
||||
#define NV_READ_PAT_ENTRIES(pat1, pat2) rdmsr(0x277, (pat1), (pat2))
|
||||
#define NV_WRITE_PAT_ENTRIES(pat1, pat2) wrmsr(0x277, (pat1), (pat2))
|
||||
#define NV_PAT_ENTRY(pat, index) \
|
||||
(((pat) & (0xff << ((index)*8))) >> ((index)*8))
|
||||
|
||||
#if NV_ENABLE_BUILTIN_PAT_SUPPORT
|
||||
|
||||
static unsigned long orig_pat1, orig_pat2;
|
||||
|
||||
static inline void nv_disable_caches(unsigned long *cr4)
|
||||
{
|
||||
unsigned long cr0 = read_cr0();
|
||||
write_cr0(((cr0 & (0xdfffffff)) | 0x40000000));
|
||||
wbinvd();
|
||||
*cr4 = NV_READ_CR4();
|
||||
if (*cr4 & 0x80) NV_WRITE_CR4(*cr4 & ~0x80);
|
||||
__flush_tlb();
|
||||
}
|
||||
|
||||
static inline void nv_enable_caches(unsigned long cr4)
|
||||
{
|
||||
unsigned long cr0 = read_cr0();
|
||||
wbinvd();
|
||||
__flush_tlb();
|
||||
write_cr0((cr0 & 0x9fffffff));
|
||||
if (cr4 & 0x80) NV_WRITE_CR4(cr4);
|
||||
}
|
||||
|
||||
static void nv_setup_pat_entries(void *info)
|
||||
{
|
||||
unsigned long pat1, pat2, cr4;
|
||||
unsigned long eflags;
|
||||
|
||||
#if defined(NV_ENABLE_HOTPLUG_CPU)
|
||||
int cpu = (NvUPtr)info;
|
||||
if ((cpu != 0) && (cpu != (int)smp_processor_id()))
|
||||
return;
|
||||
#endif
|
||||
|
||||
NV_SAVE_FLAGS(eflags);
|
||||
NV_CLI();
|
||||
nv_disable_caches(&cr4);
|
||||
|
||||
NV_READ_PAT_ENTRIES(pat1, pat2);
|
||||
|
||||
pat1 &= 0xffff00ff;
|
||||
pat1 |= 0x00000100;
|
||||
|
||||
NV_WRITE_PAT_ENTRIES(pat1, pat2);
|
||||
|
||||
nv_enable_caches(cr4);
|
||||
NV_RESTORE_FLAGS(eflags);
|
||||
}
|
||||
|
||||
static void nv_restore_pat_entries(void *info)
|
||||
{
|
||||
unsigned long cr4;
|
||||
unsigned long eflags;
|
||||
|
||||
#if defined(NV_ENABLE_HOTPLUG_CPU)
|
||||
int cpu = (NvUPtr)info;
|
||||
if ((cpu != 0) && (cpu != (int)smp_processor_id()))
|
||||
return;
|
||||
#endif
|
||||
|
||||
NV_SAVE_FLAGS(eflags);
|
||||
NV_CLI();
|
||||
nv_disable_caches(&cr4);
|
||||
|
||||
NV_WRITE_PAT_ENTRIES(orig_pat1, orig_pat2);
|
||||
|
||||
nv_enable_caches(cr4);
|
||||
NV_RESTORE_FLAGS(eflags);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE 1:
|
||||
* Functions register_cpu_notifier(), unregister_cpu_notifier(),
|
||||
* macros register_hotcpu_notifier, register_hotcpu_notifier,
|
||||
* and CPU states CPU_DOWN_FAILED, CPU_DOWN_PREPARE
|
||||
* were removed by the following commit:
|
||||
* 2016 Dec 25: b272f732f888d4cf43c943a40c9aaa836f9b7431
|
||||
*
|
||||
* NV_REGISTER_CPU_NOTIFIER_PRESENT is true when
|
||||
* register_cpu_notifier() is present.
|
||||
*
|
||||
* The functions cpuhp_setup_state() and cpuhp_remove_state() should be
|
||||
* used as an alternative to register_cpu_notifier() and
|
||||
* unregister_cpu_notifier() functions. The following
|
||||
* commit introduced these functions as well as the enum cpuhp_state.
|
||||
* 2016 Feb 26: 5b7aa87e0482be768486e0c2277aa4122487eb9d
|
||||
*
|
||||
* NV_CPUHP_CPUHP_STATE_PRESENT is true when cpuhp_setup_state() is present.
|
||||
*
|
||||
* For kernels where both cpuhp_setup_state() and register_cpu_notifier()
|
||||
* are present, we still use register_cpu_notifier().
|
||||
*/
|
||||
|
||||
static int
|
||||
nvidia_cpu_teardown(unsigned int cpu)
|
||||
{
|
||||
#if defined(NV_ENABLE_HOTPLUG_CPU)
|
||||
unsigned int this_cpu = get_cpu();
|
||||
|
||||
if (this_cpu == cpu)
|
||||
nv_restore_pat_entries(NULL);
|
||||
else
|
||||
smp_call_function(nv_restore_pat_entries, &cpu, 1);
|
||||
|
||||
put_cpu();
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvidia_cpu_online(unsigned int cpu)
|
||||
{
|
||||
#if defined(NV_ENABLE_HOTPLUG_CPU)
|
||||
unsigned int this_cpu = get_cpu();
|
||||
|
||||
if (this_cpu == cpu)
|
||||
nv_setup_pat_entries(NULL);
|
||||
else
|
||||
smp_call_function(nv_setup_pat_entries, &cpu, 1);
|
||||
|
||||
put_cpu();
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nv_enable_builtin_pat_support(void)
|
||||
{
|
||||
unsigned long pat1, pat2;
|
||||
|
||||
NV_READ_PAT_ENTRIES(orig_pat1, orig_pat2);
|
||||
nv_printf(NV_DBG_SETUP, "saved orig pats as 0x%lx 0x%lx\n", orig_pat1, orig_pat2);
|
||||
|
||||
on_each_cpu(nv_setup_pat_entries, NULL, 1);
|
||||
|
||||
NV_READ_PAT_ENTRIES(pat1, pat2);
|
||||
nv_printf(NV_DBG_SETUP, "changed pats to 0x%lx 0x%lx\n", pat1, pat2);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void nv_disable_builtin_pat_support(void)
|
||||
{
|
||||
unsigned long pat1, pat2;
|
||||
|
||||
on_each_cpu(nv_restore_pat_entries, NULL, 1);
|
||||
|
||||
nv_pat_mode = NV_PAT_MODE_DISABLED;
|
||||
|
||||
NV_READ_PAT_ENTRIES(pat1, pat2);
|
||||
nv_printf(NV_DBG_SETUP, "restored orig pats as 0x%lx 0x%lx\n", pat1, pat2);
|
||||
}
|
||||
|
||||
static int
|
||||
nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
{
|
||||
/* CPU_DOWN_FAILED was added by the following commit
|
||||
* 2004 Oct 18: 71da3667be80d30121df3972caa0bf5684228379
|
||||
*
|
||||
* CPU_DOWN_PREPARE was added by the following commit
|
||||
* 2004 Oct 18: d13d28de21d913aacd3c91e76e307fa2eb7835d8
|
||||
*
|
||||
* We use one ifdef for both macros since they were added on the same day.
|
||||
*/
|
||||
#if defined(CPU_DOWN_FAILED)
|
||||
switch (action)
|
||||
{
|
||||
case CPU_DOWN_FAILED:
|
||||
case CPU_ONLINE:
|
||||
nvidia_cpu_online((NvUPtr)hcpu);
|
||||
break;
|
||||
case CPU_DOWN_PREPARE:
|
||||
nvidia_cpu_teardown((NvUPtr)hcpu);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* See NOTE 1.
|
||||
* In order to avoid warnings for unused variable when compiling against
|
||||
* kernel versions which include changes of commit id
|
||||
* b272f732f888d4cf43c943a40c9aaa836f9b7431, we have to protect declaration
|
||||
* of nv_hotcpu_nfb with #if.
|
||||
*
|
||||
* NV_REGISTER_CPU_NOTIFIER_PRESENT is checked before
|
||||
* NV_CPUHP_SETUP_STATE_PRESENT to avoid compilation warnings for unused
|
||||
* variable nvidia_pat_online for kernels where both
|
||||
* NV_REGISTER_CPU_NOTIFIER_PRESENT and NV_CPUHP_SETUP_STATE_PRESENT
|
||||
* are true.
|
||||
*/
|
||||
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
|
||||
static struct notifier_block nv_hotcpu_nfb = {
|
||||
.notifier_call = nvidia_cpu_callback,
|
||||
.priority = 0
|
||||
};
|
||||
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
|
||||
static enum cpuhp_state nvidia_pat_online;
|
||||
#endif
|
||||
|
||||
static int
|
||||
nvidia_register_cpu_hotplug_notifier(void)
|
||||
{
|
||||
int ret;
|
||||
/* See NOTE 1 */
|
||||
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
|
||||
/* register_hotcpu_notiifer() returns 0 on success or -ENOENT on failure */
|
||||
ret = register_hotcpu_notifier(&nv_hotcpu_nfb);
|
||||
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
|
||||
/*
|
||||
* cpuhp_setup_state() returns positive number on success when state is
|
||||
* CPUHP_AP_ONLINE_DYN. On failure, it returns a negative number.
|
||||
*/
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
||||
"nvidia/pat:online",
|
||||
nvidia_cpu_online,
|
||||
nvidia_cpu_teardown);
|
||||
if (ret < 0)
|
||||
{
|
||||
/*
|
||||
* If cpuhp_setup_state() fails, the cpuhp_remove_state()
|
||||
* should never be called. If it gets called, we might remove
|
||||
* some other state. Hence, explicitly set
|
||||
* nvidia_pat_online to zero. This will trigger a BUG()
|
||||
* in cpuhp_remove_state().
|
||||
*/
|
||||
nvidia_pat_online = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
nvidia_pat_online = ret;
|
||||
}
|
||||
#else
|
||||
|
||||
/*
|
||||
* This function should be a no-op for kernels which
|
||||
* - do not have CONFIG_HOTPLUG_CPU enabled,
|
||||
* - do not have PAT support,
|
||||
* - do not have the cpuhp_setup_state() function.
|
||||
*
|
||||
* On such kernels, returning an error here would result in module init
|
||||
* failure. Hence, return 0 here.
|
||||
*/
|
||||
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
|
||||
{
|
||||
ret = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
ret = -EIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ret < 0)
|
||||
{
|
||||
nv_disable_pat_support();
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: CPU hotplug notifier registration failed!\n");
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvidia_unregister_cpu_hotplug_notifier(void)
|
||||
{
|
||||
/* See NOTE 1 */
|
||||
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
|
||||
unregister_hotcpu_notifier(&nv_hotcpu_nfb);
|
||||
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
|
||||
cpuhp_remove_state(nvidia_pat_online);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
#else /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
|
||||
|
||||
static int nv_enable_builtin_pat_support(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static void nv_disable_builtin_pat_support(void)
|
||||
{
|
||||
}
|
||||
static int nvidia_register_cpu_hotplug_notifier(void)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
static void nvidia_unregister_cpu_hotplug_notifier(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
|
||||
|
||||
static int nv_determine_pat_mode(void)
|
||||
{
|
||||
unsigned int pat1, pat2, i;
|
||||
NvU8 PAT_WC_index;
|
||||
|
||||
if (!test_bit(X86_FEATURE_PAT,
|
||||
(volatile unsigned long *)&boot_cpu_data.x86_capability))
|
||||
{
|
||||
if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) ||
|
||||
(boot_cpu_data.cpuid_level < 1) ||
|
||||
((cpuid_edx(1) & (1 << 16)) == 0) ||
|
||||
(boot_cpu_data.x86 != 6) || (boot_cpu_data.x86_model >= 15))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: CPU does not support the PAT.\n");
|
||||
return NV_PAT_MODE_DISABLED;
|
||||
}
|
||||
}
|
||||
|
||||
NV_READ_PAT_ENTRIES(pat1, pat2);
|
||||
PAT_WC_index = 0xf;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
{
|
||||
if (NV_PAT_ENTRY(pat1, i) == 0x01)
|
||||
{
|
||||
PAT_WC_index = i;
|
||||
break;
|
||||
}
|
||||
|
||||
if (NV_PAT_ENTRY(pat2, i) == 0x01)
|
||||
{
|
||||
PAT_WC_index = (i + 4);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (PAT_WC_index == 1)
|
||||
{
|
||||
return NV_PAT_MODE_KERNEL;
|
||||
}
|
||||
else if (PAT_WC_index != 0xf)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: PAT configuration unsupported.\n");
|
||||
return NV_PAT_MODE_DISABLED;
|
||||
}
|
||||
else
|
||||
{
|
||||
#if NV_ENABLE_BUILTIN_PAT_SUPPORT
|
||||
return NV_PAT_MODE_BUILTIN;
|
||||
#else
|
||||
return NV_PAT_MODE_DISABLED;
|
||||
#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int nv_enable_pat_support(void)
|
||||
{
|
||||
if (nv_pat_mode != NV_PAT_MODE_DISABLED)
|
||||
return 1;
|
||||
|
||||
nv_pat_mode = nv_determine_pat_mode();
|
||||
|
||||
switch (nv_pat_mode)
|
||||
{
|
||||
case NV_PAT_MODE_DISABLED:
|
||||
/* avoid the PAT if unavailable/unusable */
|
||||
return 0;
|
||||
case NV_PAT_MODE_KERNEL:
|
||||
/* inherit the kernel's PAT layout */
|
||||
return 1;
|
||||
case NV_PAT_MODE_BUILTIN:
|
||||
/* use builtin code to modify the PAT layout */
|
||||
break;
|
||||
}
|
||||
|
||||
return nv_enable_builtin_pat_support();
|
||||
}
|
||||
|
||||
void nv_disable_pat_support(void)
|
||||
{
|
||||
if (nv_pat_mode != NV_PAT_MODE_BUILTIN)
|
||||
return;
|
||||
|
||||
nv_disable_builtin_pat_support();
|
||||
}
|
||||
|
||||
int nv_init_pat_support(nvidia_stack_t *sp)
|
||||
{
|
||||
NV_STATUS status;
|
||||
NvU32 data;
|
||||
int disable_pat = 0;
|
||||
int ret = 0;
|
||||
|
||||
status = rm_read_registry_dword(sp, NULL,
|
||||
NV_USE_PAGE_ATTRIBUTE_TABLE, &data);
|
||||
if ((status == NV_OK) && ((int)data != ~0))
|
||||
{
|
||||
disable_pat = (data == 0);
|
||||
}
|
||||
|
||||
if (!disable_pat)
|
||||
{
|
||||
nv_enable_pat_support();
|
||||
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
|
||||
{
|
||||
ret = nvidia_register_cpu_hotplug_notifier();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: builtin PAT support disabled.\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_teardown_pat_support(void)
|
||||
{
|
||||
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
|
||||
{
|
||||
nv_disable_pat_support();
|
||||
nvidia_unregister_cpu_hotplug_notifier();
|
||||
}
|
||||
}
|
||||
#endif /* defined(NV_ENABLE_PAT_SUPPORT) */
|
||||
59
kernel-open/nvidia/nv-pat.h
Normal file
59
kernel-open/nvidia/nv-pat.h
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _NV_PAT_H_
|
||||
#define _NV_PAT_H_
|
||||
|
||||
#include "nv-linux.h"
|
||||
|
||||
|
||||
#if defined(NV_ENABLE_PAT_SUPPORT)
|
||||
extern int nv_init_pat_support(nvidia_stack_t *sp);
|
||||
extern void nv_teardown_pat_support(void);
|
||||
extern int nv_enable_pat_support(void);
|
||||
extern void nv_disable_pat_support(void);
|
||||
#else
|
||||
static inline int nv_init_pat_support(nvidia_stack_t *sp)
|
||||
{
|
||||
(void)sp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nv_teardown_pat_support(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline int nv_enable_pat_support(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void nv_disable_pat_support(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _NV_PAT_H_ */
|
||||
79
kernel-open/nvidia/nv-pci-table.c
Normal file
79
kernel-open/nvidia/nv-pci-table.c
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "nv-pci-table.h"
|
||||
|
||||
/* Devices supported by RM */
|
||||
struct pci_device_id nv_pci_table[] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_DISPLAY_VGA << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_DISPLAY_3D << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
/* Devices supported by all drivers in nvidia.ko */
|
||||
struct pci_device_id nv_module_device_table[] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_DISPLAY_VGA << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_DISPLAY_3D << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_BRIDGE_OTHER << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, nv_module_device_table);
|
||||
31
kernel-open/nvidia/nv-pci-table.h
Normal file
31
kernel-open/nvidia/nv-pci-table.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NV_PCI_TABLE_H_
|
||||
#define _NV_PCI_TABLE_H_
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
extern struct pci_device_id nv_pci_table[];
|
||||
|
||||
#endif /* _NV_PCI_TABLE_H_ */
|
||||
1092
kernel-open/nvidia/nv-pci.c
Normal file
1092
kernel-open/nvidia/nv-pci.c
Normal file
File diff suppressed because it is too large
Load Diff
47
kernel-open/nvidia/nv-procfs-utils.c
Normal file
47
kernel-open/nvidia/nv-procfs-utils.c
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_PROC_FS)
|
||||
|
||||
#include "nv-procfs-utils.h"
|
||||
|
||||
void
|
||||
nv_procfs_unregister_all(struct proc_dir_entry *entry, struct proc_dir_entry *delimiter)
|
||||
{
|
||||
#if defined(NV_PROC_REMOVE_PRESENT)
|
||||
proc_remove(entry);
|
||||
#else
|
||||
while (entry)
|
||||
{
|
||||
struct proc_dir_entry *next = entry->next;
|
||||
if (entry->subdir)
|
||||
nv_procfs_unregister_all(entry->subdir, delimiter);
|
||||
remove_proc_entry(entry->name, entry->parent);
|
||||
if (entry == delimiter)
|
||||
break;
|
||||
entry = next;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
1477
kernel-open/nvidia/nv-procfs.c
Normal file
1477
kernel-open/nvidia/nv-procfs.c
Normal file
File diff suppressed because it is too large
Load Diff
930
kernel-open/nvidia/nv-reg.h
Normal file
930
kernel-open/nvidia/nv-reg.h
Normal file
@@ -0,0 +1,930 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _RM_REG_H_
|
||||
#define _RM_REG_H_
|
||||
|
||||
#include "nvtypes.h"
|
||||
|
||||
/*
|
||||
* use NV_REG_STRING to stringify a registry key when using that registry key
|
||||
*/
|
||||
|
||||
#define __NV_REG_STRING(regkey) #regkey
|
||||
#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey)
|
||||
|
||||
/*
|
||||
* use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition
|
||||
* of registry keys in the kernel module source code.
|
||||
*/
|
||||
|
||||
#define __NV_REG_VAR(regkey) NVreg_##regkey
|
||||
|
||||
#if defined(NV_MODULE_PARAMETER)
|
||||
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
|
||||
static NvU32 __NV_REG_VAR(regkey) = (default_value); \
|
||||
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
|
||||
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
|
||||
NvU32 __NV_REG_VAR(regkey) = (default_value); \
|
||||
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
|
||||
#else
|
||||
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
|
||||
static NvU32 __NV_REG_VAR(regkey) = (default_value)
|
||||
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
|
||||
NvU32 __NV_REG_VAR(regkey) = (default_value)
|
||||
#endif
|
||||
|
||||
#if defined(NV_MODULE_STRING_PARAMETER)
|
||||
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
|
||||
char *__NV_REG_VAR(regkey) = (default_value); \
|
||||
NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey))
|
||||
#else
|
||||
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
|
||||
char *__NV_REG_VAR(regkey) = (default_value)
|
||||
#endif
|
||||
|
||||
#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \
|
||||
{ NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) }
|
||||
|
||||
/*
|
||||
* Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of
|
||||
* the regkey and the name of the module parameter. When using this macro, the
|
||||
* name of the parameter is passed to the extra "parameter" argument, and it is
|
||||
* this name that must be used in the NV_DEFINE_REG_ENTRY() macro.
|
||||
*/
|
||||
|
||||
#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \
|
||||
{ NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)}
|
||||
|
||||
/*
|
||||
*----------------- registry key definitions--------------------------
|
||||
*/
|
||||
|
||||
/*
|
||||
* Option: ModifyDeviceFiles
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA driver will verify the validity
|
||||
* of the NVIDIA device files in /dev and attempt to dynamically modify
|
||||
* and/or (re-)create them, if necessary. If you don't wish for the NVIDIA
|
||||
* driver to touch the device files, you can use this registry key.
|
||||
*
|
||||
* This module parameter is only honored by the NVIDIA GPU driver and NVIDIA
|
||||
* capability driver. Furthermore, the NVIDIA capability driver provides
|
||||
* modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of
|
||||
* this module parameter per device file.
|
||||
*
|
||||
* Possible Values:
|
||||
* 0 = disable dynamic device file management
|
||||
* 1 = enable dynamic device file management (default)
|
||||
*/
|
||||
|
||||
#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles
|
||||
#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES)
|
||||
|
||||
/*
|
||||
* Option: DeviceFileUID
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* This registry key specifies the UID assigned to the NVIDIA device files
|
||||
* created and/or modified by the NVIDIA driver when dynamic device file
|
||||
* management is enabled.
|
||||
*
|
||||
* This module parameter is only honored by the NVIDIA GPU driver.
|
||||
*
|
||||
* The default UID is 0 ('root').
|
||||
*/
|
||||
|
||||
#define __NV_DEVICE_FILE_UID DeviceFileUID
|
||||
#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID)
|
||||
|
||||
/*
|
||||
* Option: DeviceFileGID
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* This registry key specifies the GID assigned to the NVIDIA device files
|
||||
* created and/or modified by the NVIDIA driver when dynamic device file
|
||||
* management is enabled.
|
||||
*
|
||||
* This module parameter is only honored by the NVIDIA GPU driver.
|
||||
*
|
||||
* The default GID is 0 ('root').
|
||||
*/
|
||||
|
||||
#define __NV_DEVICE_FILE_GID DeviceFileGID
|
||||
#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID)
|
||||
|
||||
/*
|
||||
* Option: DeviceFileMode
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* This registry key specifies the device file mode assigned to the NVIDIA
|
||||
* device files created and/or modified by the NVIDIA driver when dynamic
|
||||
* device file management is enabled.
|
||||
*
|
||||
* This module parameter is only honored by the NVIDIA GPU driver.
|
||||
*
|
||||
* The default mode is 0666 (octal, rw-rw-rw-).
|
||||
*/
|
||||
|
||||
#define __NV_DEVICE_FILE_MODE DeviceFileMode
|
||||
#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE)
|
||||
|
||||
/*
|
||||
* Option: ResmanDebugLevel
|
||||
*
|
||||
* Default value: ~0
|
||||
*/
|
||||
|
||||
#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel
|
||||
#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL)
|
||||
|
||||
/*
|
||||
* Option: RmLogonRC
|
||||
*
|
||||
* Default value: 1
|
||||
*/
|
||||
|
||||
#define __NV_RM_LOGON_RC RmLogonRC
|
||||
#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC)
|
||||
|
||||
/*
|
||||
* Option: InitializeSystemMemoryAllocations
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* The NVIDIA Linux driver normally clears system memory it allocates
|
||||
* for use with GPUs or within the driver stack. This is to ensure
|
||||
* that potentially sensitive data is not rendered accessible by
|
||||
* arbitrary user applications.
|
||||
*
|
||||
* Owners of single-user systems or similar trusted configurations may
|
||||
* choose to disable the aforementioned clears using this option and
|
||||
* potentially improve performance.
|
||||
*
|
||||
* Possible values:
|
||||
*
|
||||
* 1 = zero out system memory allocations (default)
|
||||
* 0 = do not perform memory clears
|
||||
*/
|
||||
|
||||
#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
|
||||
InitializeSystemMemoryAllocations
|
||||
#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
|
||||
NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS)
|
||||
|
||||
/*
|
||||
* Option: RegistryDwords
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* This option accepts a semicolon-separated list of key=value pairs. Each
|
||||
* key name is checked against the table of static options; if a match is
|
||||
* found, the static option value is overridden, but invalid options remain
|
||||
* invalid. Pairs that do not match an entry in the static option table
|
||||
* are passed on to the RM directly.
|
||||
*
|
||||
* Format:
|
||||
*
|
||||
* NVreg_RegistryDwords="<key=value>;<key=value>;..."
|
||||
*/
|
||||
|
||||
#define __NV_REGISTRY_DWORDS RegistryDwords
|
||||
#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS)
|
||||
|
||||
/*
|
||||
* Option: RegistryDwordsPerDevice
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* This option allows to specify registry keys per GPU device. It helps to
|
||||
* control registry at GPU level of granularity. It accepts a semicolon
|
||||
* separated list of key=value pairs. The first key value pair MUST be
|
||||
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
|
||||
* number and F is the Function. This PCI BDF is used to identify which GPU to
|
||||
* assign the registry keys that follows next.
|
||||
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
|
||||
* found, then all the registry keys that follows are skipped, until we find next
|
||||
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
|
||||
* the value of the "pci" string:
|
||||
* 1) bus:slot : Domain and function defaults to 0.
|
||||
* 2) domain:bus:slot : Function defaults to 0.
|
||||
* 3) domain:bus:slot.func : Complete PCI dev id string.
|
||||
*
|
||||
* For each of the registry keys that follows, key name is checked against the
|
||||
* table of static options; if a match is found, the static option value is
|
||||
* overridden, but invalid options remain invalid. Pairs that do not match an
|
||||
* entry in the static option table are passed on to the RM directly.
|
||||
*
|
||||
* Format:
|
||||
*
|
||||
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;<key=value>;<key=value>;..; \
|
||||
* pci=DDDD:BB:DD.F;<key=value>;..;"
|
||||
*/
|
||||
|
||||
#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice
|
||||
#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE)
|
||||
|
||||
#define __NV_RM_MSG RmMsg
|
||||
#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG)
|
||||
|
||||
/*
|
||||
* Option: UsePageAttributeTable
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* Enable/disable use of the page attribute table (PAT) available in
|
||||
* modern x86/x86-64 processors to set the effective memory type of memory
|
||||
* mappings to write-combining (WC).
|
||||
*
|
||||
* If enabled, an x86 processor with PAT support is present and the host
|
||||
* system's Linux kernel did not configure one of the PAT entries to
|
||||
* indicate the WC memory type, the driver will change the second entry in
|
||||
* the PAT from its default (write-through (WT)) to WC at module load
|
||||
* time. If the kernel did update one of the PAT entries, the driver will
|
||||
* not modify the PAT.
|
||||
*
|
||||
* In both cases, the driver will honor attempts to map memory with the WC
|
||||
* memory type by selecting the appropriate PAT entry using the correct
|
||||
* set of PTE flags.
|
||||
*
|
||||
* Possible values:
|
||||
*
|
||||
* ~0 = use the NVIDIA driver's default logic (default)
|
||||
* 1 = enable use of the PAT for WC mappings.
|
||||
* 0 = disable use of the PAT for WC mappings.
|
||||
*/
|
||||
|
||||
#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable
|
||||
#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE)
|
||||
|
||||
/*
|
||||
* Option: EnableMSI
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled and the host kernel supports the MSI feature,
|
||||
* the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the
|
||||
* support for this feature instead of using PCI-E wired interrupt.
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0 = disable MSI interrupt
|
||||
* 1 = enable MSI interrupt (default)
|
||||
*
|
||||
*/
|
||||
|
||||
#define __NV_ENABLE_MSI EnableMSI
|
||||
#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI)
|
||||
|
||||
/*
|
||||
* Option: RegisterForACPIEvents
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA driver will register with the
|
||||
* ACPI subsystem to receive notification of ACPI events.
|
||||
*
|
||||
* Possible values:
|
||||
*
|
||||
* 1 - register for ACPI events (default)
|
||||
* 0 - do not register for ACPI events
|
||||
*/
|
||||
|
||||
#define __NV_REGISTER_FOR_ACPI_EVENTS RegisterForACPIEvents
|
||||
#define NV_REG_REGISTER_FOR_ACPI_EVENTS NV_REG_STRING(__NV_REGISTER_FOR_ACPI_EVENTS)
|
||||
|
||||
/*
|
||||
* Option: EnablePCIeGen3
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs
|
||||
* when configured on SandyBridge E desktop platforms, NVIDIA feels that
|
||||
* delivering a reliable, high-quality experience is not currently possible in
|
||||
* PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and
|
||||
* NVS Kepler products operate in PCIe Gen2 mode by default. You may use this
|
||||
* option to enable PCIe Gen3 support.
|
||||
*
|
||||
* This is completely unsupported!
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0: disable PCIe Gen3 support (default)
|
||||
* 1: enable PCIe Gen3 support
|
||||
*/
|
||||
|
||||
#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3
|
||||
#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3)
|
||||
|
||||
/*
|
||||
* Option: MemoryPoolSize
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When set to a non-zero value, this option specifies the size of the
|
||||
* memory pool, given as a multiple of 1 GB, created on VMware ESXi to
|
||||
* satisfy any system memory allocations requested by the NVIDIA kernel
|
||||
* module.
|
||||
*/
|
||||
|
||||
#define __NV_MEMORY_POOL_SIZE MemoryPoolSize
|
||||
#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE)
|
||||
|
||||
/*
|
||||
* Option: KMallocHeapMaxSize
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When set to a non-zero value, this option specifies the maximum size of the
|
||||
* heap memory space reserved for kmalloc operations. Given as a
|
||||
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
|
||||
* allocations requested by the NVIDIA kernel module.
|
||||
*/
|
||||
|
||||
#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize
|
||||
#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE)
|
||||
|
||||
/*
|
||||
* Option: VMallocHeapMaxSize
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When set to a non-zero value, this option specifies the maximum size of the
|
||||
* heap memory space reserved for vmalloc operations. Given as a
|
||||
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
|
||||
* allocations requested by the NVIDIA kernel module.
|
||||
*/
|
||||
|
||||
#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize
|
||||
#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE)
|
||||
|
||||
/*
|
||||
* Option: IgnoreMMIOCheck
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA kernel module will ignore
|
||||
* MMIO limit check during device probe on VMWare ESXi kernel. This is
|
||||
* typically necessary when VMware ESXi MMIO limit differs between any
|
||||
* base version and its updates. Customer using updates can set regkey
|
||||
* to avoid probe failure.
|
||||
*/
|
||||
|
||||
#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck
|
||||
#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK)
|
||||
|
||||
/*
|
||||
* Option: TCEBypassMode
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA kernel module will attempt to setup
|
||||
* all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass
|
||||
* the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically
|
||||
* necessary for CUDA applications in which large system memory mappings may
|
||||
* exceed the default TCE remapping capacity when operated in non-bypass mode.
|
||||
*
|
||||
* This option has no effect on non-POWER platforms.
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0: system default TCE mode on all GPUs
|
||||
* 1: enable TCE bypass mode on all GPUs
|
||||
* 2: disable TCE bypass mode on all GPUs
|
||||
*/
|
||||
#define __NV_TCE_BYPASS_MODE TCEBypassMode
|
||||
#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE)
|
||||
|
||||
#define NV_TCE_BYPASS_MODE_DEFAULT 0
|
||||
#define NV_TCE_BYPASS_MODE_ENABLE 1
|
||||
#define NV_TCE_BYPASS_MODE_DISABLE 2
|
||||
|
||||
/*
|
||||
* Option: pci
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* On Unix platforms, per GPU based registry key can be specified as:
|
||||
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,<per-gpu registry keys>".
|
||||
* where DDDD:BB:DD.F refers to Domain:Bus:Device.Function.
|
||||
* We need this key "pci" to identify what follows next is a PCI BDF identifier,
|
||||
* for which the registry keys are to be applied.
|
||||
*
|
||||
* This define is not used on non-UNIX platforms.
|
||||
*
|
||||
* Possible Formats for value:
|
||||
*
|
||||
* 1) bus:slot : Domain and function defaults to 0.
|
||||
* 2) domain:bus:slot : Function defaults to 0.
|
||||
* 3) domain:bus:slot.func : Complete PCI BDF identifier string.
|
||||
*/
|
||||
#define __NV_PCI_DEVICE_BDF pci
|
||||
#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF)
|
||||
|
||||
/*
|
||||
* Option: EnableStreamMemOPs
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the CUDA driver will enable support for
|
||||
* CUDA Stream Memory Operations in user-mode applications, which are so
|
||||
* far required to be disabled by default due to limited support in
|
||||
* devtools.
|
||||
*
|
||||
* Note: this is treated as a hint. MemOPs may still be left disabled by CUDA
|
||||
* driver for other reasons.
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0 = disable feature (default)
|
||||
* 1 = enable feature
|
||||
*/
|
||||
#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs
|
||||
#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS)
|
||||
|
||||
/*
|
||||
* Option: EnableUserNUMAManagement
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA kernel module will require the
|
||||
* user-mode NVIDIA Persistence daemon to manage the onlining and offlining
|
||||
* of its NUMA device memory.
|
||||
*
|
||||
* This option has no effect on platforms that do not support onlining
|
||||
* device memory to a NUMA node (this feature is only supported on certain
|
||||
* POWER9 systems).
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0: disable user-mode NUMA management
|
||||
* 1: enable user-mode NUMA management (default)
|
||||
*/
|
||||
#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement
|
||||
#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT)
|
||||
|
||||
/*
|
||||
* Option: GpuBlacklist
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* This option accepts a list of blacklisted GPUs, separated by commas, that
|
||||
* cannot be attached or used. Each blacklisted GPU is identified by a UUID in
|
||||
* the ASCII format with leading "GPU-". An exact match is required; no partial
|
||||
* UUIDs. This regkey is deprecated and will be removed in the future. Use
|
||||
* NV_REG_EXCLUDED_GPUS instead.
|
||||
*/
|
||||
#define __NV_GPU_BLACKLIST GpuBlacklist
|
||||
#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST)
|
||||
|
||||
/*
|
||||
* Option: ExcludedGpus
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* This option accepts a list of excluded GPUs, separated by commas, that
|
||||
* cannot be attached or used. Each excluded GPU is identified by a UUID in
|
||||
* the ASCII format with leading "GPU-". An exact match is required; no partial
|
||||
* UUIDs.
|
||||
*/
|
||||
#define __NV_EXCLUDED_GPUS ExcludedGpus
|
||||
#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS)
|
||||
|
||||
/*
|
||||
* Option: NvLinkDisable
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA kernel module will not attempt to
|
||||
* initialize or train NVLink connections for any GPUs. System reboot is required
|
||||
* for changes to take affect.
|
||||
*
|
||||
* This option has no effect if no GPUs support NVLink.
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0: Do not disable NVLink (default)
|
||||
* 1: Disable NVLink
|
||||
*/
|
||||
#define __NV_NVLINK_DISABLE NvLinkDisable
|
||||
#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE)
|
||||
|
||||
/*
|
||||
* Option: RestrictProfilingToAdminUsers
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA kernel module will prevent users
|
||||
* without administrative access (i.e., the CAP_SYS_ADMIN capability) from
|
||||
* using GPU performance counters.
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0: Do not restrict GPU counters (default)
|
||||
* 1: Restrict GPU counters to system administrators only
|
||||
*/
|
||||
|
||||
#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly
|
||||
#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers
|
||||
#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY)
|
||||
|
||||
/*
|
||||
* Option: TemporaryFilePath
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When specified, this option changes the location in which the
|
||||
* NVIDIA kernel module will create unnamed temporary files (e.g. to
|
||||
* save the contents of video memory in). The indicated file must
|
||||
* be a directory. By default, temporary files are created in /tmp.
|
||||
*/
|
||||
#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath
|
||||
#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH)
|
||||
|
||||
/*
|
||||
* Option: PreserveVideoMemoryAllocations
|
||||
*
|
||||
* If enabled, this option prompts the NVIDIA kernel module to save and
|
||||
* restore all video memory allocations across system power management
|
||||
* cycles, i.e. suspend/resume and hibernate/restore. Otherwise,
|
||||
* only select allocations are preserved.
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0: Preserve only select video memory allocations (default)
|
||||
* 1: Preserve all video memory allocations
|
||||
*/
|
||||
#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations
|
||||
#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \
|
||||
NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS)
|
||||
|
||||
|
||||
/*
|
||||
* Option: EnableS0ixPowerManagement
|
||||
*
|
||||
* When this option is enabled, the NVIDIA driver will use S0ix-based
|
||||
* power management for system suspend/resume, if both the platform and
|
||||
* the GPU support S0ix.
|
||||
*
|
||||
* During system suspend, if S0ix is enabled and
|
||||
* video memory usage is above the threshold configured by
|
||||
* 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept
|
||||
* in self-refresh mode while the rest of the GPU is powered down.
|
||||
*
|
||||
* Otherwise, the driver will copy video memory contents to system memory
|
||||
* and power off the video memory along with the GPU.
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0: Disable S0ix based power management (default)
|
||||
* 1: Enable S0ix based power management
|
||||
*/
|
||||
|
||||
#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement
|
||||
#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \
|
||||
NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT)
|
||||
|
||||
/*
|
||||
* Option: S0ixPowerManagementVideoMemoryThreshold
|
||||
*
|
||||
* This option controls the threshold that the NVIDIA driver will use during
|
||||
* S0ix-based system power management.
|
||||
*
|
||||
* When S0ix is enabled and the system is suspended, the driver will
|
||||
* compare the amount of video memory in use with this threshold,
|
||||
* to decide whether to keep video memory in self-refresh or copy video
|
||||
* memory content to system memory.
|
||||
*
|
||||
* See the 'EnableS0ixPowerManagement' option.
|
||||
*
|
||||
* Values are expressed in Megabytes (1048576 bytes).
|
||||
*
|
||||
* Default value for this option is 256MB.
|
||||
*
|
||||
*/
|
||||
#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
|
||||
S0ixPowerManagementVideoMemoryThreshold
|
||||
#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
|
||||
NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
|
||||
|
||||
|
||||
/*
|
||||
* Option: DynamicPowerManagement
|
||||
*
|
||||
* This option controls how aggressively the NVIDIA kernel module will manage
|
||||
* GPU power through kernel interfaces.
|
||||
*
|
||||
* Possible Values:
|
||||
*
|
||||
* 0: Never allow the GPU to be powered down (default).
|
||||
* 1: Power down the GPU when it is not initialized.
|
||||
* 2: Power down the GPU after it has been inactive for some time.
|
||||
* 3: (Default) Power down the GPU after a period of inactivity (i.e.,
|
||||
* mode 2) on Ampere or later notebooks. Otherwise, do not power down
|
||||
* the GPU.
|
||||
*/
|
||||
#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement
|
||||
#define NV_REG_DYNAMIC_POWER_MANAGEMENT \
|
||||
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT)
|
||||
|
||||
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0
|
||||
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1
|
||||
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2
|
||||
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3
|
||||
|
||||
/*
|
||||
* Option: DynamicPowerManagementVideoMemoryThreshold
|
||||
*
|
||||
* This option controls the threshold that the NVIDIA driver will use
|
||||
* when selecting the dynamic power management scheme.
|
||||
*
|
||||
* When the driver detects that the GPU is idle, it will compare the amount
|
||||
* of video memory in use with this threshold.
|
||||
*
|
||||
* If the current video memory usage is less than the threshold, the
|
||||
* driver may preserve video memory contents in system memory and power off
|
||||
* the video memory along with the GPU itself, if supported. Otherwise,
|
||||
* the video memory will be kept in self-refresh mode while powering down
|
||||
* the rest of the GPU, if supported.
|
||||
*
|
||||
* Values are expressed in Megabytes (1048576 bytes).
|
||||
*
|
||||
* If the requested value is greater than 200MB (the default), then it
|
||||
* will be capped to 200MB.
|
||||
*/
|
||||
#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
|
||||
DynamicPowerManagementVideoMemoryThreshold
|
||||
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
|
||||
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
|
||||
|
||||
/*
|
||||
* Option: RegisterPCIDriver
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA driver will register with
|
||||
* PCI subsystem.
|
||||
*
|
||||
* Possible values:
|
||||
*
|
||||
* 1 - register as PCI driver (default)
|
||||
* 0 - do not register as PCI driver
|
||||
*/
|
||||
|
||||
#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver
|
||||
#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER)
|
||||
|
||||
/*
|
||||
* Option: EnablePCIERelaxedOrderingMode
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the registry key RmSetPCIERelaxedOrdering will
|
||||
* be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing
|
||||
* every device to set the relaxed ordering bit to 1 in all outbound MWr
|
||||
* transaction-layer packets. This is equivalent to setting the regkey to
|
||||
* FORCE_ENABLE as a non-per-device registry key.
|
||||
*
|
||||
* Possible values:
|
||||
* 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default)
|
||||
* 1 - Enable PCIe TLP relaxed ordering bit-setting
|
||||
*/
|
||||
#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode
|
||||
#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \
|
||||
NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE)
|
||||
|
||||
/*
|
||||
* Option: EnableGpuFirmware
|
||||
*
|
||||
* Description:
|
||||
*
|
||||
* When this option is enabled, the NVIDIA driver will enable use of GPU
|
||||
* firmware.
|
||||
*
|
||||
* Possible mode values:
|
||||
* 0 - Do not enable GPU firmware
|
||||
* 1 - Enable GPU firmware
|
||||
* 2 - (Default) Use the default enablement policy for GPU firmware
|
||||
*
|
||||
* Setting this to anything other than 2 will alter driver firmware-
|
||||
* enablement policies, possibly disabling GPU firmware where it would
|
||||
* have otherwise been enabled by default.
|
||||
*
|
||||
* If this key is set globally to the system, the driver may still attempt
|
||||
* to apply some policies to maintain uniform firmware modes across all
|
||||
* GPUS. This may result in the driver failing initialization on some GPUs
|
||||
* to maintain such a policy.
|
||||
*
|
||||
* If this key is set using NVreg_RegistryDwordsPerDevice, then the driver
|
||||
* will attempt to honor whatever configuration is specified without applying
|
||||
* additional policies. This may also result in failed GPU initialzations if
|
||||
* the configuration is not possible (for example if the firmware is missing
|
||||
* from the filesystem, or the GPU is not capable).
|
||||
*
|
||||
* Policy bits:
|
||||
*
|
||||
* POLICY_ALLOW_FALLBACK:
|
||||
* As the normal behavior is to fail GPU initialization if this registry
|
||||
* entry is set in such a way that results in an invalid configuration, if
|
||||
* instead the user would like the driver to automatically try to fallback
|
||||
* to initializing the failing GPU with firmware disabled, then this bit can
|
||||
* be set (ex: 0x11 means try to enable GPU firmware but fall back if needed).
|
||||
* Note that this can result in a mixed mode configuration (ex: GPU0 has
|
||||
* firmware enabled, but GPU1 does not).
|
||||
*
|
||||
*/
|
||||
|
||||
#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE)
|
||||
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
|
||||
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
|
||||
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF
|
||||
|
||||
/*
|
||||
* Option: EnableGpuFirmwareLogs
|
||||
*
|
||||
* When this option is enabled, the NVIDIA driver will send GPU firmware logs
|
||||
* to the system log, when possible.
|
||||
*
|
||||
* Possible values:
|
||||
* 0 - Do not send GPU firmware logs to the system log
|
||||
* 1 - Enable sending of GPU firmware logs to the system log
|
||||
* 2 - (Default) Enable sending of GPU firmware logs to the system log for
|
||||
* the debug kernel driver build only
|
||||
*/
|
||||
#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS)
|
||||
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002
|
||||
|
||||
/*
|
||||
* Option: EnableDbgBreakpoint
|
||||
*
|
||||
* When this option is set to a non-zero value, and the kernel is configured
|
||||
* appropriately, assertions within resman will trigger a CPU breakpoint (e.g.,
|
||||
* INT3 on x86_64), assumed to be caught by an attached debugger.
|
||||
*
|
||||
* When this option is set to the value zero (the default), assertions within
|
||||
* resman will print to the system log, but no CPU breakpoint will be triggered.
|
||||
*/
|
||||
#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint
|
||||
|
||||
|
||||
/*
|
||||
* Option: OpenRmEnableUnsupportedGpus
|
||||
*
|
||||
* Open nvidia.ko support for features beyond what is used on Data Center GPUs
|
||||
* is still fairly immature, so for now require users to opt into use of open
|
||||
* nvidia.ko with a special registry key, if not on a Data Center GPU.
|
||||
*/
|
||||
|
||||
#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus
|
||||
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS)
|
||||
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000
|
||||
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001
|
||||
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE
|
||||
|
||||
|
||||
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
|
||||
|
||||
/*
|
||||
*---------registry key parameter declarations--------------
|
||||
*/
|
||||
|
||||
NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0);
|
||||
NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1);
|
||||
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0);
|
||||
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0);
|
||||
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666);
|
||||
NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1);
|
||||
NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0);
|
||||
NV_DEFINE_REG_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS, 1);
|
||||
NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0);
|
||||
NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1);
|
||||
NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT);
|
||||
NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0);
|
||||
NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1);
|
||||
NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0);
|
||||
|
||||
NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0);
|
||||
NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256);
|
||||
|
||||
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3);
|
||||
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200);
|
||||
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE);
|
||||
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG);
|
||||
NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT);
|
||||
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
|
||||
|
||||
|
||||
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1);
|
||||
|
||||
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
|
||||
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL);
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL);
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL);
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL);
|
||||
NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
|
||||
|
||||
/*
|
||||
*----------------registry database definition----------------------
|
||||
*/
|
||||
|
||||
/*
|
||||
* You can enable any of the registry options disabled by default by
|
||||
* editing their respective entries in the table below. The last field
|
||||
* determines if the option is considered valid - in order for the
|
||||
* changes to take effect, you need to recompile and reload the NVIDIA
|
||||
* kernel module.
|
||||
*/
|
||||
nv_parm_t nv_parms[] = {
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY,
|
||||
__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS),
|
||||
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
|
||||
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT),
|
||||
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS),
|
||||
{NULL, NULL}
|
||||
};
|
||||
|
||||
#elif defined(NVRM)
|
||||
|
||||
extern nv_parm_t nv_parms[];
|
||||
|
||||
#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */
|
||||
|
||||
#endif /* _RM_REG_H_ */
|
||||
89
kernel-open/nvidia/nv-report-err.c
Normal file
89
kernel-open/nvidia/nv-report-err.c
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include "nv-linux.h"
|
||||
#include "os-interface.h"
|
||||
|
||||
#include "nv-report-err.h"
|
||||
|
||||
nv_report_error_cb_t nv_error_cb_handle = NULL;
|
||||
|
||||
int nv_register_error_cb(nv_report_error_cb_t report_error_cb)
|
||||
{
|
||||
if (report_error_cb == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (nv_error_cb_handle != NULL)
|
||||
return -EBUSY;
|
||||
|
||||
nv_error_cb_handle = report_error_cb;
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nv_register_error_cb);
|
||||
|
||||
int nv_unregister_error_cb(void)
|
||||
{
|
||||
if (nv_error_cb_handle == NULL)
|
||||
return -EPERM;
|
||||
|
||||
nv_error_cb_handle = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nv_unregister_error_cb);
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
void nv_report_error(
|
||||
struct pci_dev *dev,
|
||||
NvU32 error_number,
|
||||
const char *format,
|
||||
va_list ap
|
||||
)
|
||||
{
|
||||
va_list ap_copy;
|
||||
char *buffer;
|
||||
int length = 0;
|
||||
int status = NV_OK;
|
||||
|
||||
if (nv_error_cb_handle != NULL)
|
||||
{
|
||||
va_copy(ap_copy, ap);
|
||||
length = vsnprintf(NULL, 0, format, ap);
|
||||
va_end(ap_copy);
|
||||
|
||||
if (length > 0)
|
||||
{
|
||||
status = os_alloc_mem((void *)&buffer, (length + 1)*sizeof(char));
|
||||
|
||||
if (status == NV_OK)
|
||||
{
|
||||
vsnprintf(buffer, length, format, ap);
|
||||
nv_error_cb_handle(dev, error_number, buffer, length + 1);
|
||||
os_free_mem(buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
66
kernel-open/nvidia/nv-report-err.h
Normal file
66
kernel-open/nvidia/nv-report-err.h
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NV_REPORT_ERR_H_
|
||||
#define _NV_REPORT_ERR_H_
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Callback definition for obtaining XID error string and data.
|
||||
*
|
||||
* @param[in] pci_dev *
|
||||
* Structure describring GPU PCI device.
|
||||
* @param[in] uint32_t
|
||||
* XID number
|
||||
* @param[in] char *
|
||||
* Error string with HWERR info.
|
||||
* @param[in] int
|
||||
* Length of error string.
|
||||
*/
|
||||
typedef void (*nv_report_error_cb_t)(struct pci_dev *, uint32_t, char *, int);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Register callback function to obtain XID error string and data.
|
||||
*
|
||||
* @param[in] report_error_cb
|
||||
* A function pointer to recieve callback.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL callback handle is NULL.
|
||||
* -EBUSY callback handle is already registered.
|
||||
*/
|
||||
int nv_register_error_cb(nv_report_error_cb_t report_error_cb);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Unregisters callback function handle.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EPERM unregister not permitted on NULL callback handle.
|
||||
*/
|
||||
int nv_unregister_error_cb(void);
|
||||
|
||||
#endif /* _NV_REPORT_ERR_H_ */
|
||||
201
kernel-open/nvidia/nv-rsync.c
Normal file
201
kernel-open/nvidia/nv-rsync.c
Normal file
@@ -0,0 +1,201 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nv-linux.h"
|
||||
#include "nv-rsync.h"
|
||||
|
||||
nv_rsync_info_t g_rsync_info;
|
||||
|
||||
void nv_init_rsync_info(
|
||||
void
|
||||
)
|
||||
{
|
||||
g_rsync_info.relaxed_ordering_mode = NV_FALSE;
|
||||
g_rsync_info.usage_count = 0;
|
||||
g_rsync_info.data = NULL;
|
||||
NV_INIT_MUTEX(&g_rsync_info.lock);
|
||||
}
|
||||
|
||||
void nv_destroy_rsync_info(
|
||||
void
|
||||
)
|
||||
{
|
||||
WARN_ON(g_rsync_info.data);
|
||||
WARN_ON(g_rsync_info.usage_count);
|
||||
WARN_ON(g_rsync_info.relaxed_ordering_mode);
|
||||
}
|
||||
|
||||
int nv_get_rsync_info(
|
||||
void
|
||||
)
|
||||
{
|
||||
int mode;
|
||||
int rc = 0;
|
||||
|
||||
down(&g_rsync_info.lock);
|
||||
|
||||
if (g_rsync_info.usage_count == 0)
|
||||
{
|
||||
if (g_rsync_info.get_relaxed_ordering_mode)
|
||||
{
|
||||
rc = g_rsync_info.get_relaxed_ordering_mode(&mode,
|
||||
g_rsync_info.data);
|
||||
if (rc != 0)
|
||||
{
|
||||
goto done;
|
||||
}
|
||||
|
||||
g_rsync_info.relaxed_ordering_mode = !!mode;
|
||||
}
|
||||
}
|
||||
|
||||
g_rsync_info.usage_count++;
|
||||
|
||||
done:
|
||||
up(&g_rsync_info.lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void nv_put_rsync_info(
|
||||
void
|
||||
)
|
||||
{
|
||||
int mode;
|
||||
|
||||
down(&g_rsync_info.lock);
|
||||
|
||||
g_rsync_info.usage_count--;
|
||||
|
||||
if (g_rsync_info.usage_count == 0)
|
||||
{
|
||||
if (g_rsync_info.put_relaxed_ordering_mode)
|
||||
{
|
||||
mode = g_rsync_info.relaxed_ordering_mode;
|
||||
g_rsync_info.put_relaxed_ordering_mode(mode, g_rsync_info.data);
|
||||
g_rsync_info.relaxed_ordering_mode = NV_FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
up(&g_rsync_info.lock);
|
||||
}
|
||||
|
||||
int nv_register_rsync_driver(
|
||||
int (*get_relaxed_ordering_mode)(int *mode, void *data),
|
||||
void (*put_relaxed_ordering_mode)(int mode, void *data),
|
||||
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
|
||||
void *data
|
||||
)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
down(&g_rsync_info.lock);
|
||||
|
||||
if (g_rsync_info.get_relaxed_ordering_mode != NULL)
|
||||
{
|
||||
rc = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (g_rsync_info.usage_count != 0)
|
||||
{
|
||||
rc = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
|
||||
g_rsync_info.get_relaxed_ordering_mode = get_relaxed_ordering_mode;
|
||||
g_rsync_info.put_relaxed_ordering_mode = put_relaxed_ordering_mode;
|
||||
g_rsync_info.wait_for_rsync = wait_for_rsync;
|
||||
g_rsync_info.data = data;
|
||||
|
||||
done:
|
||||
up(&g_rsync_info.lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void nv_unregister_rsync_driver(
|
||||
int (*get_relaxed_ordering_mode)(int *mode, void *data),
|
||||
void (*put_relaxed_ordering_mode)(int mode, void *data),
|
||||
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
|
||||
void *data
|
||||
)
|
||||
{
|
||||
down(&g_rsync_info.lock);
|
||||
|
||||
WARN_ON(g_rsync_info.usage_count != 0);
|
||||
|
||||
WARN_ON(g_rsync_info.get_relaxed_ordering_mode !=
|
||||
get_relaxed_ordering_mode);
|
||||
WARN_ON(g_rsync_info.put_relaxed_ordering_mode !=
|
||||
put_relaxed_ordering_mode);
|
||||
WARN_ON(g_rsync_info.wait_for_rsync != wait_for_rsync);
|
||||
WARN_ON(g_rsync_info.data != data);
|
||||
|
||||
g_rsync_info.get_relaxed_ordering_mode = NULL;
|
||||
g_rsync_info.put_relaxed_ordering_mode = NULL;
|
||||
g_rsync_info.wait_for_rsync = NULL;
|
||||
g_rsync_info.data = NULL;
|
||||
|
||||
up(&g_rsync_info.lock);
|
||||
}
|
||||
|
||||
NvBool nv_get_rsync_relaxed_ordering_mode(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
/* shouldn't be called without opening a device */
|
||||
WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0);
|
||||
|
||||
/*
|
||||
* g_rsync_info.relaxed_ordering_mode can be safely accessed outside of
|
||||
* g_rsync_info.lock once a device is opened. During nvidia_open(), we
|
||||
* lock the relaxed ordering state by ref-counting the rsync module
|
||||
* through get_relaxed_ordering_mode.
|
||||
*/
|
||||
return g_rsync_info.relaxed_ordering_mode;
|
||||
}
|
||||
|
||||
void nv_wait_for_rsync(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
/* shouldn't be called without opening a device */
|
||||
WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0);
|
||||
|
||||
/*
|
||||
* g_rsync_info.relaxed_ordering_mode can be safely accessed outside of
|
||||
* g_rsync_info.lock once a device is opened. During nvidia_open(), we
|
||||
* block unregistration of the rsync driver by ref-counting the module
|
||||
* through get_relaxed_ordering_mode.
|
||||
*/
|
||||
if (g_rsync_info.relaxed_ordering_mode)
|
||||
{
|
||||
WARN_ON(g_rsync_info.wait_for_rsync == NULL);
|
||||
g_rsync_info.wait_for_rsync(nvl->pci_dev, g_rsync_info.data);
|
||||
}
|
||||
}
|
||||
57
kernel-open/nvidia/nv-rsync.h
Normal file
57
kernel-open/nvidia/nv-rsync.h
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NV_RSYNC_H_
|
||||
#define _NV_RSYNC_H_
|
||||
|
||||
#include "nv-linux.h"
|
||||
|
||||
typedef struct nv_rsync_info
|
||||
{
|
||||
struct semaphore lock;
|
||||
uint32_t usage_count;
|
||||
NvBool relaxed_ordering_mode;
|
||||
int (*get_relaxed_ordering_mode)(int *mode, void *data);
|
||||
void (*put_relaxed_ordering_mode)(int mode, void *data);
|
||||
void (*wait_for_rsync)(struct pci_dev *gpu, void *data);
|
||||
void *data;
|
||||
} nv_rsync_info_t;
|
||||
|
||||
void nv_init_rsync_info(void);
|
||||
void nv_destroy_rsync_info(void);
|
||||
int nv_get_rsync_info(void);
|
||||
void nv_put_rsync_info(void);
|
||||
int nv_register_rsync_driver(
|
||||
int (*get_relaxed_ordering_mode)(int *mode, void *data),
|
||||
void (*put_relaxed_ordering_mode)(int mode, void *data),
|
||||
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
|
||||
void *data);
|
||||
void nv_unregister_rsync_driver(
|
||||
int (*get_relaxed_ordering_mode)(int *mode, void *data),
|
||||
void (*put_relaxed_ordering_mode)(int mode, void *data),
|
||||
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
|
||||
void *data);
|
||||
NvBool nv_get_rsync_relaxed_ordering_mode(nv_state_t *nv);
|
||||
void nv_wait_for_rsync(nv_state_t *nv);
|
||||
|
||||
#endif
|
||||
160
kernel-open/nvidia/nv-usermap.c
Normal file
160
kernel-open/nvidia/nv-usermap.c
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-frontend.h"
|
||||
|
||||
NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
|
||||
nv_state_t *nv,
|
||||
nv_usermap_access_params_t *nvuap,
|
||||
NvU32 prot,
|
||||
void *pAllocPriv,
|
||||
NvU64 pageIndex,
|
||||
NvU32 fd
|
||||
)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
nv_alloc_mapping_context_t *nvamc = NULL;
|
||||
nv_file_private_t *nvfp = NULL;
|
||||
nv_linux_file_private_t *nvlfp = NULL;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
void *priv = NULL;
|
||||
|
||||
nvfp = nv_get_file_private(fd, NV_IS_CTL_DEVICE(nv), &priv);
|
||||
if (nvfp == NULL)
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
|
||||
nvlfp = nv_get_nvlfp_from_nvfp(nvfp);
|
||||
|
||||
nvamc = &nvlfp->mmap_context;
|
||||
|
||||
if (nvamc->valid)
|
||||
{
|
||||
status = NV_ERR_STATE_IN_USE;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (NV_IS_CTL_DEVICE(nv))
|
||||
{
|
||||
nvamc->alloc = pAllocPriv;
|
||||
nvamc->page_index = pageIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (NV_STATE_PTR(nvlfp->nvptr) != nv)
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto done;
|
||||
}
|
||||
|
||||
nvamc->mmap_start = nvuap->mmap_start;
|
||||
nvamc->mmap_size = nvuap->mmap_size;
|
||||
if (nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE)
|
||||
{
|
||||
nvamc->page_array = nvuap->page_array;
|
||||
nvamc->num_pages = nvuap->num_pages;
|
||||
}
|
||||
nvamc->access_start = nvuap->access_start;
|
||||
nvamc->access_size = nvuap->access_size;
|
||||
nvamc->remap_prot_extra = nvuap->remap_prot_extra;
|
||||
}
|
||||
|
||||
nvamc->prot = prot;
|
||||
nvamc->valid = NV_TRUE;
|
||||
|
||||
done:
|
||||
nv_put_file_private(priv);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_alloc_user_mapping(
|
||||
nv_state_t *nv,
|
||||
void *pAllocPrivate,
|
||||
NvU64 pageIndex,
|
||||
NvU32 pageOffset,
|
||||
NvU64 size,
|
||||
NvU32 protect,
|
||||
NvU64 *pUserAddress,
|
||||
void **ppPrivate
|
||||
)
|
||||
{
|
||||
nv_alloc_t *at = pAllocPrivate;
|
||||
|
||||
if (at->flags.contig)
|
||||
*pUserAddress = (at->page_table[0]->phys_addr + (pageIndex * PAGE_SIZE) + pageOffset);
|
||||
else
|
||||
*pUserAddress = (at->page_table[pageIndex]->phys_addr + pageOffset);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_free_user_mapping(
|
||||
nv_state_t *nv,
|
||||
void *pAllocPrivate,
|
||||
NvU64 userAddress,
|
||||
void *pPrivate
|
||||
)
|
||||
{
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function adjust the {mmap,access}_{start,size} to reflect platform-specific
|
||||
* mechanisms for isolating mappings at a finer granularity than the os_page_size
|
||||
*/
|
||||
NV_STATUS NV_API_CALL nv_get_usermap_access_params(
|
||||
nv_state_t *nv,
|
||||
nv_usermap_access_params_t *nvuap
|
||||
)
|
||||
{
|
||||
NvU64 addr = nvuap->addr;
|
||||
NvU64 size = nvuap->size;
|
||||
|
||||
nvuap->remap_prot_extra = 0;
|
||||
|
||||
/*
|
||||
* Do verification and cache encoding based on the original
|
||||
* (ostensibly smaller) mmap request, since accesses should be
|
||||
* restricted to that range.
|
||||
*/
|
||||
if (rm_gpu_need_4k_page_isolation(nv) &&
|
||||
NV_4K_PAGE_ISOLATION_REQUIRED(addr, size))
|
||||
{
|
||||
#if defined(NV_4K_PAGE_ISOLATION_PRESENT)
|
||||
nvuap->remap_prot_extra = NV_PROT_4K_PAGE_ISOLATION;
|
||||
nvuap->access_start = (NvU64)NV_4K_PAGE_ISOLATION_ACCESS_START(addr);
|
||||
nvuap->access_size = NV_4K_PAGE_ISOLATION_ACCESS_LEN(addr, size);
|
||||
nvuap->mmap_start = (NvU64)NV_4K_PAGE_ISOLATION_MMAP_ADDR(addr);
|
||||
nvuap->mmap_size = NV_4K_PAGE_ISOLATION_MMAP_LEN(size);
|
||||
#else
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "4K page isolation required but not available!\n");
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
#endif
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
726
kernel-open/nvidia/nv-vm.c
Normal file
726
kernel-open/nvidia/nv-vm.c
Normal file
@@ -0,0 +1,726 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
static inline void nv_set_contig_memory_uc(nvidia_pte_t *page_ptr, NvU32 num_pages)
|
||||
{
|
||||
#if defined(NV_SET_MEMORY_UC_PRESENT)
|
||||
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
set_memory_uc(addr, num_pages);
|
||||
#elif defined(NV_SET_PAGES_UC_PRESENT)
|
||||
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
set_pages_uc(page, num_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_set_contig_memory_wb(nvidia_pte_t *page_ptr, NvU32 num_pages)
|
||||
{
|
||||
#if defined(NV_SET_MEMORY_UC_PRESENT)
|
||||
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
set_memory_wb(addr, num_pages);
|
||||
#elif defined(NV_SET_PAGES_UC_PRESENT)
|
||||
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
set_pages_wb(page, num_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int nv_set_memory_array_type_present(NvU32 type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
case NV_MEMORY_UNCACHED:
|
||||
return 1;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
return 1;
|
||||
#endif
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int nv_set_pages_array_type_present(NvU32 type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
case NV_MEMORY_UNCACHED:
|
||||
return 1;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
return 1;
|
||||
#endif
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nv_set_memory_array_type(
|
||||
unsigned long *pages,
|
||||
NvU32 num_pages,
|
||||
NvU32 type
|
||||
)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
case NV_MEMORY_UNCACHED:
|
||||
set_memory_array_uc(pages, num_pages);
|
||||
break;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
set_memory_array_wb(pages, num_pages);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s(): type %d unimplemented\n",
|
||||
__FUNCTION__, type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nv_set_pages_array_type(
|
||||
struct page **pages,
|
||||
NvU32 num_pages,
|
||||
NvU32 type
|
||||
)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
case NV_MEMORY_UNCACHED:
|
||||
set_pages_array_uc(pages, num_pages);
|
||||
break;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
set_pages_array_wb(pages, num_pages);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s(): type %d unimplemented\n",
|
||||
__FUNCTION__, type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nv_set_contig_memory_type(
|
||||
nvidia_pte_t *page_ptr,
|
||||
NvU32 num_pages,
|
||||
NvU32 type
|
||||
)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case NV_MEMORY_UNCACHED:
|
||||
nv_set_contig_memory_uc(page_ptr, num_pages);
|
||||
break;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
nv_set_contig_memory_wb(page_ptr, num_pages);
|
||||
break;
|
||||
default:
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s(): type %d unimplemented\n",
|
||||
__FUNCTION__, type);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type)
|
||||
{
|
||||
NvU32 i;
|
||||
NV_STATUS status = NV_OK;
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
unsigned long *pages = NULL;
|
||||
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
struct page **pages = NULL;
|
||||
#else
|
||||
unsigned long *pages = NULL;
|
||||
#endif
|
||||
|
||||
nvidia_pte_t *page_ptr;
|
||||
struct page *page;
|
||||
|
||||
if (nv_set_memory_array_type_present(type))
|
||||
{
|
||||
status = os_alloc_mem((void **)&pages,
|
||||
at->num_pages * sizeof(unsigned long));
|
||||
|
||||
}
|
||||
else if (nv_set_pages_array_type_present(type))
|
||||
{
|
||||
status = os_alloc_mem((void **)&pages,
|
||||
at->num_pages * sizeof(struct page*));
|
||||
}
|
||||
|
||||
if (status != NV_OK)
|
||||
pages = NULL;
|
||||
|
||||
//
|
||||
// If the set_{memory,page}_array_* functions are in the kernel interface,
|
||||
// it's faster to use them since they work on non-contiguous memory,
|
||||
// whereas the set_{memory,page}_* functions do not.
|
||||
//
|
||||
if (pages)
|
||||
{
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
pages[i] = (unsigned long)page_address(page);
|
||||
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
pages[i] = page;
|
||||
#endif
|
||||
}
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
nv_set_memory_array_type(pages, at->num_pages, type);
|
||||
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
nv_set_pages_array_type(pages, at->num_pages, type);
|
||||
#endif
|
||||
os_free_mem(pages);
|
||||
}
|
||||
|
||||
//
|
||||
// If the set_{memory,page}_array_* functions aren't present in the kernel
|
||||
// interface, each page has to be set individually, which has been measured
|
||||
// to be ~10x slower than using the set_{memory,page}_array_* functions.
|
||||
//
|
||||
else
|
||||
{
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
nv_set_contig_memory_type(at->page_table[i], 1, type);
|
||||
}
|
||||
}
|
||||
|
||||
static NvU64 nv_get_max_sysmem_address(void)
|
||||
{
|
||||
NvU64 global_max_pfn = 0ULL;
|
||||
int node_id;
|
||||
|
||||
for_each_online_node(node_id)
|
||||
{
|
||||
global_max_pfn = max(global_max_pfn, node_end_pfn(node_id));
|
||||
}
|
||||
|
||||
return ((global_max_pfn + 1) << PAGE_SHIFT) - 1;
|
||||
}
|
||||
|
||||
static unsigned int nv_compute_gfp_mask(
|
||||
nv_state_t *nv,
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
unsigned int gfp_mask = NV_GFP_KERNEL;
|
||||
struct device *dev = at->dev;
|
||||
|
||||
/*
|
||||
* If we know that SWIOTLB is enabled (and therefore we avoid calling the
|
||||
* kernel to DMA-remap the pages), or if we are using dma_direct (which may
|
||||
* transparently use the SWIOTLB for pages that are unaddressable by the
|
||||
* device, in kernel versions 5.0 and later), limit our allocation pool
|
||||
* to the first 4GB to avoid allocating pages outside of our device's
|
||||
* addressable limit.
|
||||
* Also, limit the allocation to the first 4GB if explicitly requested by
|
||||
* setting the "nv->force_dma32_alloc" variable.
|
||||
*/
|
||||
if (!nv || !nv_requires_dma_remap(nv) || nv_is_dma_direct(dev) || nv->force_dma32_alloc)
|
||||
{
|
||||
NvU64 max_sysmem_address = nv_get_max_sysmem_address();
|
||||
if ((dev && dev->dma_mask && (*(dev->dma_mask) < max_sysmem_address)) ||
|
||||
(nv && nv->force_dma32_alloc))
|
||||
{
|
||||
gfp_mask = NV_GFP_DMA32;
|
||||
}
|
||||
}
|
||||
#if defined(__GFP_RETRY_MAYFAIL)
|
||||
gfp_mask |= __GFP_RETRY_MAYFAIL;
|
||||
#elif defined(__GFP_NORETRY)
|
||||
gfp_mask |= __GFP_NORETRY;
|
||||
#endif
|
||||
#if defined(__GFP_ZERO)
|
||||
if (at->flags.zeroed)
|
||||
gfp_mask |= __GFP_ZERO;
|
||||
#endif
|
||||
#if defined(__GFP_THISNODE)
|
||||
if (at->flags.node0)
|
||||
gfp_mask |= __GFP_THISNODE;
|
||||
#endif
|
||||
// Compound pages are required by vm_insert_page for high-order page
|
||||
// allocations
|
||||
if (at->order > 0)
|
||||
gfp_mask |= __GFP_COMP;
|
||||
|
||||
return gfp_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is needed for allocating contiguous physical memory in xen
|
||||
* dom0. Because of the use of xen sw iotlb in xen dom0, memory allocated by
|
||||
* NV_GET_FREE_PAGES may not be machine contiguous when size is more than
|
||||
* 1 page. nv_alloc_coherent_pages() will give us machine contiguous memory.
|
||||
* Even though we get dma_address directly in this function, we will
|
||||
* still call pci_map_page() later to get dma address. This is fine as it
|
||||
* will return the same machine address.
|
||||
*/
|
||||
static NV_STATUS nv_alloc_coherent_pages(
|
||||
nv_state_t *nv,
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
nvidia_pte_t *page_ptr;
|
||||
NvU32 i;
|
||||
unsigned int gfp_mask;
|
||||
unsigned long virt_addr = 0;
|
||||
dma_addr_t bus_addr;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct device *dev = nvl->dev;
|
||||
|
||||
gfp_mask = nv_compute_gfp_mask(nv, at);
|
||||
|
||||
virt_addr = (unsigned long)dma_alloc_coherent(dev,
|
||||
at->num_pages * PAGE_SIZE,
|
||||
&bus_addr,
|
||||
gfp_mask);
|
||||
if (!virt_addr)
|
||||
{
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
|
||||
page_ptr->virt_addr = virt_addr + i * PAGE_SIZE;
|
||||
page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr);
|
||||
page_ptr->dma_addr = bus_addr + i * PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_UNCACHED);
|
||||
}
|
||||
|
||||
at->flags.coherent = NV_TRUE;
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static void nv_free_coherent_pages(
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
nvidia_pte_t *page_ptr;
|
||||
struct device *dev = at->dev;
|
||||
|
||||
page_ptr = at->page_table[0];
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_WRITEBACK);
|
||||
}
|
||||
|
||||
dma_free_coherent(dev, at->num_pages * PAGE_SIZE,
|
||||
(void *)page_ptr->virt_addr, page_ptr->dma_addr);
|
||||
}
|
||||
|
||||
NV_STATUS nv_alloc_contig_pages(
|
||||
nv_state_t *nv,
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_pte_t *page_ptr;
|
||||
NvU32 i, j;
|
||||
unsigned int gfp_mask;
|
||||
unsigned long virt_addr = 0;
|
||||
NvU64 phys_addr;
|
||||
struct device *dev = at->dev;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
|
||||
|
||||
// TODO: This is a temporary WAR, and will be removed after fixing bug 200732409.
|
||||
if (os_is_xen_dom0() || at->flags.unencrypted)
|
||||
return nv_alloc_coherent_pages(nv, at);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
at->order = get_order(at->num_pages * PAGE_SIZE);
|
||||
gfp_mask = nv_compute_gfp_mask(nv, at);
|
||||
|
||||
if (at->flags.node0)
|
||||
{
|
||||
NV_ALLOC_PAGES_NODE(virt_addr, 0, at->order, gfp_mask);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
|
||||
}
|
||||
if (virt_addr == 0)
|
||||
{
|
||||
if (os_is_vgx_hyper())
|
||||
{
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: failed to allocate memory, trying coherent memory \n", __FUNCTION__);
|
||||
|
||||
status = nv_alloc_coherent_pages(nv, at);
|
||||
return status;
|
||||
}
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
#if !defined(__GFP_ZERO)
|
||||
if (at->flags.zeroed)
|
||||
memset((void *)virt_addr, 0, (at->num_pages * PAGE_SIZE));
|
||||
#endif
|
||||
|
||||
for (i = 0; i < at->num_pages; i++, virt_addr += PAGE_SIZE)
|
||||
{
|
||||
phys_addr = nv_get_kern_phys_address(virt_addr);
|
||||
if (phys_addr == 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: %s: failed to look up physical address\n",
|
||||
__FUNCTION__);
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr->phys_addr = phys_addr;
|
||||
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
|
||||
page_ptr->virt_addr = virt_addr;
|
||||
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
|
||||
|
||||
NV_MAYBE_RESERVE_PAGE(page_ptr);
|
||||
}
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_UNCACHED);
|
||||
}
|
||||
|
||||
at->flags.coherent = NV_FALSE;
|
||||
|
||||
return NV_OK;
|
||||
|
||||
failed:
|
||||
if (i > 0)
|
||||
{
|
||||
for (j = 0; j < i; j++)
|
||||
NV_MAYBE_UNRESERVE_PAGE(at->page_table[j]);
|
||||
}
|
||||
|
||||
page_ptr = at->page_table[0];
|
||||
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void nv_free_contig_pages(
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
nvidia_pte_t *page_ptr;
|
||||
unsigned int i;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
|
||||
|
||||
if (at->flags.coherent)
|
||||
return nv_free_coherent_pages(at);
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_WRITEBACK);
|
||||
}
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
|
||||
if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
|
||||
{
|
||||
static int count = 0;
|
||||
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: %s: page count != initial page count (%u,%u)\n",
|
||||
__FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
|
||||
page_ptr->page_count);
|
||||
}
|
||||
}
|
||||
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
|
||||
}
|
||||
|
||||
page_ptr = at->page_table[0];
|
||||
|
||||
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
|
||||
}
|
||||
|
||||
NV_STATUS nv_alloc_system_pages(
|
||||
nv_state_t *nv,
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_pte_t *page_ptr;
|
||||
NvU32 i, j;
|
||||
unsigned int gfp_mask;
|
||||
unsigned long virt_addr = 0;
|
||||
NvU64 phys_addr;
|
||||
struct device *dev = at->dev;
|
||||
dma_addr_t bus_addr;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %u: %u pages\n", __FUNCTION__, at->num_pages);
|
||||
|
||||
gfp_mask = nv_compute_gfp_mask(nv, at);
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
if (at->flags.unencrypted && (dev != NULL))
|
||||
{
|
||||
virt_addr = (unsigned long)dma_alloc_coherent(dev,
|
||||
PAGE_SIZE,
|
||||
&bus_addr,
|
||||
gfp_mask);
|
||||
at->flags.coherent = NV_TRUE;
|
||||
}
|
||||
else if (at->flags.node0)
|
||||
{
|
||||
NV_ALLOC_PAGES_NODE(virt_addr, 0, 0, gfp_mask);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_GET_FREE_PAGES(virt_addr, 0, gfp_mask);
|
||||
}
|
||||
|
||||
if (virt_addr == 0)
|
||||
{
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
goto failed;
|
||||
}
|
||||
#if !defined(__GFP_ZERO)
|
||||
if (at->flags.zeroed)
|
||||
memset((void *)virt_addr, 0, PAGE_SIZE);
|
||||
#endif
|
||||
|
||||
phys_addr = nv_get_kern_phys_address(virt_addr);
|
||||
if (phys_addr == 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: %s: failed to look up physical address\n",
|
||||
__FUNCTION__);
|
||||
NV_FREE_PAGES(virt_addr, 0);
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
#if defined(_PAGE_NX)
|
||||
if (((_PAGE_NX & pgprot_val(PAGE_KERNEL)) != 0) &&
|
||||
(phys_addr < 0x400000))
|
||||
{
|
||||
nv_printf(NV_DBG_SETUP,
|
||||
"NVRM: VM: %s: discarding page @ 0x%llx\n",
|
||||
__FUNCTION__, phys_addr);
|
||||
--i;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr->phys_addr = phys_addr;
|
||||
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
|
||||
page_ptr->virt_addr = virt_addr;
|
||||
|
||||
//
|
||||
// Use unencrypted dma_addr returned by dma_alloc_coherent() as
|
||||
// nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled.
|
||||
//
|
||||
if (at->flags.coherent)
|
||||
page_ptr->dma_addr = bus_addr;
|
||||
else if (dev)
|
||||
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
|
||||
else
|
||||
page_ptr->dma_addr = page_ptr->phys_addr;
|
||||
|
||||
NV_MAYBE_RESERVE_PAGE(page_ptr);
|
||||
}
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
nv_set_memory_type(at, NV_MEMORY_UNCACHED);
|
||||
|
||||
return NV_OK;
|
||||
|
||||
failed:
|
||||
if (i > 0)
|
||||
{
|
||||
for (j = 0; j < i; j++)
|
||||
{
|
||||
page_ptr = at->page_table[j];
|
||||
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
|
||||
if (at->flags.coherent)
|
||||
{
|
||||
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
|
||||
page_ptr->dma_addr);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_FREE_PAGES(page_ptr->virt_addr, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void nv_free_system_pages(
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
nvidia_pte_t *page_ptr;
|
||||
unsigned int i;
|
||||
struct device *dev = at->dev;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
nv_set_memory_type(at, NV_MEMORY_WRITEBACK);
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
|
||||
if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
|
||||
{
|
||||
static int count = 0;
|
||||
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: %s: page count != initial page count (%u,%u)\n",
|
||||
__FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
|
||||
page_ptr->page_count);
|
||||
}
|
||||
}
|
||||
|
||||
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
|
||||
if (at->flags.coherent)
|
||||
{
|
||||
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
|
||||
page_ptr->dma_addr);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_FREE_PAGES(page_ptr->virt_addr, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NvUPtr nv_vm_map_pages(
|
||||
struct page **pages,
|
||||
NvU32 count,
|
||||
NvBool cached,
|
||||
NvBool unencrypted
|
||||
)
|
||||
{
|
||||
NvUPtr virt_addr = 0;
|
||||
|
||||
if (!NV_MAY_SLEEP())
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s: can't map %d pages, invalid context!\n",
|
||||
__FUNCTION__, count);
|
||||
os_dbg_breakpoint();
|
||||
return virt_addr;
|
||||
}
|
||||
|
||||
virt_addr = nv_vmap(pages, count, cached, unencrypted);
|
||||
return virt_addr;
|
||||
}
|
||||
|
||||
void nv_vm_unmap_pages(
|
||||
NvUPtr virt_addr,
|
||||
NvU32 count
|
||||
)
|
||||
{
|
||||
if (!NV_MAY_SLEEP())
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s: can't unmap %d pages at 0x%0llx, "
|
||||
"invalid context!\n", __FUNCTION__, count, virt_addr);
|
||||
os_dbg_breakpoint();
|
||||
return;
|
||||
}
|
||||
|
||||
nv_vunmap(virt_addr, count);
|
||||
}
|
||||
|
||||
void nv_address_space_init_once(struct address_space *mapping)
|
||||
{
|
||||
#if defined(NV_ADDRESS_SPACE_INIT_ONCE_PRESENT)
|
||||
address_space_init_once(mapping);
|
||||
#else
|
||||
memset(mapping, 0, sizeof(*mapping));
|
||||
INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
|
||||
|
||||
#if defined(NV_ADDRESS_SPACE_HAS_RWLOCK_TREE_LOCK)
|
||||
//
|
||||
// The .tree_lock member variable was changed from type rwlock_t, to
|
||||
// spinlock_t, on 25 July 2008, by mainline commit
|
||||
// 19fd6231279be3c3bdd02ed99f9b0eb195978064.
|
||||
//
|
||||
rwlock_init(&mapping->tree_lock);
|
||||
#else
|
||||
spin_lock_init(&mapping->tree_lock);
|
||||
#endif
|
||||
|
||||
spin_lock_init(&mapping->i_mmap_lock);
|
||||
INIT_LIST_HEAD(&mapping->private_list);
|
||||
spin_lock_init(&mapping->private_lock);
|
||||
INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
|
||||
INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
|
||||
#endif /* !NV_ADDRESS_SPACE_INIT_ONCE_PRESENT */
|
||||
}
|
||||
39
kernel-open/nvidia/nv-vtophys.c
Normal file
39
kernel-open/nvidia/nv-vtophys.c
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address)
|
||||
{
|
||||
/* direct-mapped kernel address */
|
||||
if (virt_addr_valid(address))
|
||||
return __pa(address);
|
||||
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: can't translate address in %s()!\n", __FUNCTION__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
5628
kernel-open/nvidia/nv.c
Normal file
5628
kernel-open/nvidia/nv.c
Normal file
File diff suppressed because it is too large
Load Diff
301
kernel-open/nvidia/nv_gpu_ops.h
Normal file
301
kernel-open/nvidia/nv_gpu_ops.h
Normal file
@@ -0,0 +1,301 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* nv_gpu_ops.h
|
||||
*
|
||||
* This file defines the interface between the common RM layer
|
||||
* and the OS specific platform layers. (Currently supported
|
||||
* are Linux and KMD)
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _NV_GPU_OPS_H_
|
||||
#define _NV_GPU_OPS_H_
|
||||
#include "nvgputypes.h"
|
||||
#include "nv_uvm_types.h"
|
||||
|
||||
typedef struct gpuSession *gpuSessionHandle;
|
||||
typedef struct gpuDevice *gpuDeviceHandle;
|
||||
typedef struct gpuAddressSpace *gpuAddressSpaceHandle;
|
||||
typedef struct gpuChannel *gpuChannelHandle;
|
||||
typedef struct gpuObject *gpuObjectHandle;
|
||||
|
||||
typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
|
||||
|
||||
NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
|
||||
|
||||
NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
|
||||
|
||||
NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session,
|
||||
const gpuInfo *pGpuInfo,
|
||||
const NvProcessorUuid *gpuGuid,
|
||||
struct gpuDevice **device,
|
||||
NvBool bCreateSmcPartition);
|
||||
|
||||
NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device);
|
||||
|
||||
NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device,
|
||||
NvU64 vaBase,
|
||||
NvU64 vaSize,
|
||||
gpuAddressSpaceHandle *vaSpace,
|
||||
UvmGpuAddressSpaceInfo *vaSpaceInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1,
|
||||
gpuDeviceHandle device2,
|
||||
getP2PCapsParams *p2pCaps);
|
||||
|
||||
void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace);
|
||||
|
||||
NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace,
|
||||
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace,
|
||||
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsPmaAllocPages(void *pPma,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize,
|
||||
gpuPmaAllocationOptions *pPmaAllocOptions,
|
||||
NvU64 *pPages);
|
||||
|
||||
void nvGpuOpsPmaFreePages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize,
|
||||
NvU32 flags);
|
||||
|
||||
NV_STATUS nvGpuOpsPmaPinPages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize,
|
||||
NvU32 flags);
|
||||
|
||||
NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize);
|
||||
|
||||
NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace,
|
||||
const gpuChannelAllocParams *params,
|
||||
gpuChannelHandle *channelHandle,
|
||||
gpuChannelInfo *channelInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace,
|
||||
NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset);
|
||||
|
||||
void nvGpuOpsChannelDestroy(struct gpuChannel *channel);
|
||||
|
||||
void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
|
||||
NvU64 pointer);
|
||||
|
||||
NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace,
|
||||
NvU64 memory, NvLength length,
|
||||
void **cpuPtr, NvU32 pageSize);
|
||||
|
||||
void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace,
|
||||
void* cpuPtr);
|
||||
|
||||
NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device,
|
||||
gpuCaps *caps);
|
||||
|
||||
NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device,
|
||||
gpuCesCaps *caps);
|
||||
|
||||
NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace,
|
||||
NvU64 srcAddress,
|
||||
struct gpuAddressSpace *dstVaSpace,
|
||||
NvU64 *dstAddress);
|
||||
|
||||
NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device,
|
||||
NvHandle hClient,
|
||||
NvHandle hPhysMemory,
|
||||
NvHandle *hDupMemory,
|
||||
gpuMemoryInfo *pGpuMemoryInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice,
|
||||
NvHandle hSubDevice, NvU8 *gpuGuid,
|
||||
unsigned guidLength);
|
||||
|
||||
NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid,
|
||||
const NvU8 *gpuUuid,
|
||||
NvHandle *hClient,
|
||||
NvHandle *hDevice,
|
||||
NvHandle *hSubDevice);
|
||||
|
||||
NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device,
|
||||
NvHandle hPhysHandle);
|
||||
|
||||
NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus);
|
||||
|
||||
NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid,
|
||||
const gpuClientInfo *pGpuClientInfo,
|
||||
gpuInfo *pGpuInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId,
|
||||
NvU32 *pSubdeviceId);
|
||||
|
||||
NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts);
|
||||
|
||||
NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device);
|
||||
|
||||
NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet);
|
||||
|
||||
NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace,
|
||||
NvU64 physAddress, unsigned numEntries,
|
||||
NvBool bVidMemAperture, NvU32 pasid);
|
||||
|
||||
NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace);
|
||||
|
||||
NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt);
|
||||
|
||||
NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace);
|
||||
|
||||
NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device,
|
||||
gpuFaultInfo *pFaultInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults);
|
||||
|
||||
NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults);
|
||||
|
||||
NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device,
|
||||
NvHandle hUserClient,
|
||||
NvHandle hUserVASpace,
|
||||
struct gpuAddressSpace **vaSpace,
|
||||
UvmGpuAddressSpaceInfo *vaSpaceInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device,
|
||||
void **pPma,
|
||||
const UvmPmaStatistics **pPmaPubStats);
|
||||
|
||||
NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device,
|
||||
gpuAccessCntrInfo *pAccessCntrInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session,
|
||||
gpuAccessCntrInfo *pAccessCntrInfo,
|
||||
NvBool bOwnInterrupts);
|
||||
|
||||
NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device,
|
||||
gpuAccessCntrInfo *pAccessCntrInfo,
|
||||
gpuAccessCntrConfig *pAccessCntrConfig);
|
||||
|
||||
NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1,
|
||||
struct gpuDevice *device2,
|
||||
NvHandle *hP2pObject);
|
||||
|
||||
NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session,
|
||||
NvHandle hP2pObject);
|
||||
|
||||
NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
|
||||
NvHandle hDupedMemory,
|
||||
NvU64 offset,
|
||||
NvU64 size,
|
||||
gpuExternalMappingInfo *pGpuExternalMappingInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace,
|
||||
NvHandle hClient,
|
||||
NvHandle hChannel,
|
||||
gpuRetainedChannel **retainedChannel,
|
||||
gpuChannelInstanceInfo *channelInstanceInfo);
|
||||
|
||||
void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel);
|
||||
|
||||
NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel,
|
||||
gpuChannelResourceBindParams *channelResourceBindParams);
|
||||
|
||||
void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate);
|
||||
|
||||
NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace,
|
||||
NvP64 resourceDescriptor,
|
||||
NvU64 offset,
|
||||
NvU64 size,
|
||||
gpuExternalMappingInfo *pGpuExternalMappingInfo);
|
||||
|
||||
NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device,
|
||||
const void *pFaultPacket);
|
||||
|
||||
// Private interface used for windows only
|
||||
|
||||
#if defined(NV_WINDOWS)
|
||||
NV_STATUS nvGpuOpsGetRmHandleForSession(gpuSessionHandle hSession, NvHandle *hRmClient);
|
||||
|
||||
NV_STATUS nvGpuOpsGetRmHandleForChannel(gpuChannelHandle hChannel, NvHandle *hRmChannel);
|
||||
#endif // WINDOWS
|
||||
|
||||
// Interface used for SR-IOV heavy
|
||||
|
||||
NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device,
|
||||
const gpuPagingChannelAllocParams *params,
|
||||
gpuPagingChannelHandle *channelHandle,
|
||||
gpuPagingChannelInfo *channelinfo);
|
||||
|
||||
void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel);
|
||||
|
||||
NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace,
|
||||
NvU64 srcAddress,
|
||||
struct gpuDevice *device,
|
||||
NvU64 *dstAddress);
|
||||
|
||||
void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace,
|
||||
NvU64 srcAddress,
|
||||
struct gpuDevice *device);
|
||||
|
||||
NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
|
||||
char *methodStream,
|
||||
NvU32 methodStreamSize);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* _NV_GPU_OPS_H_*/
|
||||
1544
kernel-open/nvidia/nv_uvm_interface.c
Normal file
1544
kernel-open/nvidia/nv_uvm_interface.c
Normal file
File diff suppressed because it is too large
Load Diff
39
kernel-open/nvidia/nvidia-sources.Kbuild
Normal file
39
kernel-open/nvidia/nvidia-sources.Kbuild
Normal file
@@ -0,0 +1,39 @@
|
||||
NVIDIA_SOURCES ?=
|
||||
NVIDIA_SOURCES_CXX ?=
|
||||
|
||||
NVIDIA_SOURCES += nvidia/nv.c
|
||||
NVIDIA_SOURCES += nvidia/nv-pci.c
|
||||
NVIDIA_SOURCES += nvidia/nv-dmabuf.c
|
||||
NVIDIA_SOURCES += nvidia/nv-acpi.c
|
||||
NVIDIA_SOURCES += nvidia/nv-cray.c
|
||||
NVIDIA_SOURCES += nvidia/nv-dma.c
|
||||
NVIDIA_SOURCES += nvidia/nv-i2c.c
|
||||
NVIDIA_SOURCES += nvidia/nv-mmap.c
|
||||
NVIDIA_SOURCES += nvidia/nv-p2p.c
|
||||
NVIDIA_SOURCES += nvidia/nv-pat.c
|
||||
NVIDIA_SOURCES += nvidia/nv-procfs.c
|
||||
NVIDIA_SOURCES += nvidia/nv-procfs-utils.c
|
||||
NVIDIA_SOURCES += nvidia/nv-usermap.c
|
||||
NVIDIA_SOURCES += nvidia/nv-vm.c
|
||||
NVIDIA_SOURCES += nvidia/nv-vtophys.c
|
||||
NVIDIA_SOURCES += nvidia/os-interface.c
|
||||
NVIDIA_SOURCES += nvidia/os-mlock.c
|
||||
NVIDIA_SOURCES += nvidia/os-pci.c
|
||||
NVIDIA_SOURCES += nvidia/os-registry.c
|
||||
NVIDIA_SOURCES += nvidia/os-usermap.c
|
||||
NVIDIA_SOURCES += nvidia/nv-modeset-interface.c
|
||||
NVIDIA_SOURCES += nvidia/nv-pci-table.c
|
||||
NVIDIA_SOURCES += nvidia/nv-kthread-q.c
|
||||
NVIDIA_SOURCES += nvidia/nv-memdbg.c
|
||||
NVIDIA_SOURCES += nvidia/nv-ibmnpu.c
|
||||
NVIDIA_SOURCES += nvidia/nv-report-err.c
|
||||
NVIDIA_SOURCES += nvidia/nv-rsync.c
|
||||
NVIDIA_SOURCES += nvidia/nv-msi.c
|
||||
NVIDIA_SOURCES += nvidia/nv-caps.c
|
||||
NVIDIA_SOURCES += nvidia/nv-frontend.c
|
||||
NVIDIA_SOURCES += nvidia/nv_uvm_interface.c
|
||||
NVIDIA_SOURCES += nvidia/nvlink_linux.c
|
||||
NVIDIA_SOURCES += nvidia/nvlink_caps.c
|
||||
NVIDIA_SOURCES += nvidia/linux_nvswitch.c
|
||||
NVIDIA_SOURCES += nvidia/procfs_nvswitch.c
|
||||
NVIDIA_SOURCES += nvidia/i2c_nvswitch.c
|
||||
258
kernel-open/nvidia/nvidia.Kbuild
Normal file
258
kernel-open/nvidia/nvidia.Kbuild
Normal file
@@ -0,0 +1,258 @@
|
||||
###########################################################################
|
||||
# Kbuild fragment for nvidia.ko
|
||||
###########################################################################
|
||||
|
||||
#
|
||||
# Define NVIDIA_{SOURCES,OBJECTS}
|
||||
#
|
||||
|
||||
include $(src)/nvidia/nvidia-sources.Kbuild
|
||||
NVIDIA_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_SOURCES))
|
||||
|
||||
obj-m += nvidia.o
|
||||
nvidia-y := $(NVIDIA_OBJECTS)
|
||||
|
||||
NVIDIA_KO = nvidia/nvidia.ko
|
||||
|
||||
|
||||
#
|
||||
# nv-kernel.o_binary is the core binary component of nvidia.ko, shared
|
||||
# across all UNIX platforms. Create a symlink, "nv-kernel.o" that
|
||||
# points to nv-kernel.o_binary, and add nv-kernel.o to the list of
|
||||
# objects to link into nvidia.ko.
|
||||
#
|
||||
# Note that:
|
||||
# - The kbuild "clean" rule will delete all objects in nvidia-y (which
|
||||
# is why we use a symlink instead of just adding nv-kernel.o_binary
|
||||
# to nvidia-y).
|
||||
# - kbuild normally uses the naming convention of ".o_shipped" for
|
||||
# binary files. That is not used here, because the kbuild rule to
|
||||
# create the "normal" object file from ".o_shipped" does a copy, not
|
||||
# a symlink. This file is quite large, so a symlink is preferred.
|
||||
# - The file added to nvidia-y should be relative to gmake's cwd.
|
||||
# But, the target for the symlink rule should be prepended with $(obj).
|
||||
# - The "symlink" command is called using kbuild's if_changed macro to
|
||||
# generate an .nv-kernel.o.cmd file which can be used on subsequent
|
||||
# runs to determine if the command line to create the symlink changed
|
||||
# and needs to be re-executed.
|
||||
#
|
||||
|
||||
NVIDIA_BINARY_OBJECT := $(src)/nvidia/nv-kernel.o_binary
|
||||
NVIDIA_BINARY_OBJECT_O := nvidia/nv-kernel.o
|
||||
|
||||
quiet_cmd_symlink = SYMLINK $@
|
||||
cmd_symlink = ln -sf $< $@
|
||||
|
||||
targets += $(NVIDIA_BINARY_OBJECT_O)
|
||||
|
||||
$(obj)/$(NVIDIA_BINARY_OBJECT_O): $(NVIDIA_BINARY_OBJECT) FORCE
|
||||
$(call if_changed,symlink)
|
||||
|
||||
nvidia-y += $(NVIDIA_BINARY_OBJECT_O)
|
||||
|
||||
|
||||
#
|
||||
# Define nvidia.ko-specific CFLAGS.
|
||||
#
|
||||
|
||||
NVIDIA_CFLAGS += -I$(src)/nvidia
|
||||
NVIDIA_CFLAGS += -DNVIDIA_UNDEF_LEGACY_BIT_MACROS
|
||||
|
||||
ifeq ($(NV_BUILD_TYPE),release)
|
||||
NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG
|
||||
endif
|
||||
|
||||
ifeq ($(NV_BUILD_TYPE),develop)
|
||||
NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_MEM_LOGGER
|
||||
endif
|
||||
|
||||
ifeq ($(NV_BUILD_TYPE),debug)
|
||||
NVIDIA_CFLAGS += -DDEBUG -D_DEBUG -UNDEBUG -DNV_MEM_LOGGER
|
||||
endif
|
||||
|
||||
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_OBJECTS), $(NVIDIA_CFLAGS))
|
||||
|
||||
|
||||
#
|
||||
# nv-procfs.c requires nv-compiler.h
|
||||
#
|
||||
|
||||
NV_COMPILER_VERSION_HEADER = $(obj)/nv_compiler.h
|
||||
|
||||
$(NV_COMPILER_VERSION_HEADER):
|
||||
@echo \#define NV_COMPILER \"`$(CC) -v 2>&1 | tail -n 1`\" > $@
|
||||
|
||||
$(obj)/nvidia/nv-procfs.o: $(NV_COMPILER_VERSION_HEADER)
|
||||
|
||||
clean-files += $(NV_COMPILER_VERSION_HEADER)
|
||||
|
||||
|
||||
#
|
||||
# Build nv-interface.o from the kernel interface layer objects, suitable
|
||||
# for further processing by the top-level makefile to produce a precompiled
|
||||
# kernel interface file.
|
||||
#
|
||||
|
||||
NVIDIA_INTERFACE := nvidia/nv-interface.o
|
||||
|
||||
# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions
|
||||
# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6
|
||||
# look at both.
|
||||
|
||||
always += $(NVIDIA_INTERFACE)
|
||||
always-y += $(NVIDIA_INTERFACE)
|
||||
|
||||
$(obj)/$(NVIDIA_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_OBJECTS))
|
||||
$(LD) -r -o $@ $^
|
||||
|
||||
|
||||
#
|
||||
# Register the conftests needed by nvidia.ko
|
||||
#
|
||||
|
||||
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_OBJECTS)
|
||||
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hash__remap_4k_pfn
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_array_uc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_array_uc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_alloc_table
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_get_domain_bus_and_slot
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_num_physpages
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += efi_enabled
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += proc_remove
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pm_vt_switch_required
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += xen_ioemu_inject_msi
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += phys_to_dma
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_dma_ops
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_attr_macros
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_page_attrs
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += write_cr4
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_property
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_node_by_phandle
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_node_to_nid
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_pci_get_npu_dev
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_ibm_chip_id
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_bus_address
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_stop_and_remove_bus_device
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_remove_bus_device
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += register_cpu_notifier
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpuhp_setup_state
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_resource
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_backlight_device_by_name
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_msix_range
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_write
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_create_on_node
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_matching_node
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dev_is_pci
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_direct_map_resource
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_get_platform
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_bpmp_send_receive
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += flush_cache_all
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += jiffies_to_timespec
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += full_name_hash
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hlist_for_each_entry
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pgprot_decrypted
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_mkdec
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iterate_fd
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_page_iter_page
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += unsafe_follow_pfn
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_close_on_exec
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += device_property_read_u64
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_platform_populate
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_dma_configure
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_count_elems_of_size
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u8_array
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_new_client_device
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_unregister_device
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_named_gpio
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_gpio_request_one
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_input
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_output
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_get_value
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_set_value
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_to_irq
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_put
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_set_bw
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_export_args
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap_atomic
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_has_dynamic_attachment
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_set_mask_and_coherent
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_bus_get_device
|
||||
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_map_sg_attrs
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_dma_ops
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___close_fd
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_close_fd
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd_flags
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_get_default_device
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_byte_offset
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_aperture
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_register_ipc_client
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_unregister_ipc_client
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_client_ipc_send_recv
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_dram_clk_to_mc_clk
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_dram_num_channels
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dram_types
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node
|
||||
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += kuid_t
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += noncoherent_swiotlb_dma_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_insert_pfn_prot
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += address_space_init_once
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vmbus_channel_has_ringbuffer_page
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += device_driver_of_match_table
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += device_of_node
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += node_states_n_memory
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vmalloc_has_pgprot_t_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_channel_state
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_dev_has_ats_enabled
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mt_device_gre
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg
|
||||
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_build
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_csp_build
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages_remote
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += pm_runtime_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
|
||||
122
kernel-open/nvidia/nvlink_caps.c
Normal file
122
kernel-open/nvidia/nvlink_caps.c
Normal file
@@ -0,0 +1,122 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvlink_os.h"
|
||||
#include "nvlink_linux.h"
|
||||
#include "nvlink_caps.h"
|
||||
#include "nv-caps.h"
|
||||
|
||||
#define NVLINK_CAP_FABRIC_MGMT "fabric-mgmt"
|
||||
|
||||
typedef struct
|
||||
{
|
||||
nv_cap_t *root;
|
||||
nv_cap_t *fabric_mgmt;
|
||||
} nvlink_caps_t;
|
||||
|
||||
static nvlink_caps_t nvlink_caps = {0};
|
||||
|
||||
int nvlink_cap_acquire(int fd, NvU32 type)
|
||||
{
|
||||
int dup_fd = -1;
|
||||
|
||||
if (fd < 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch(type)
|
||||
{
|
||||
case NVLINK_CAP_FABRIC_MANAGEMENT:
|
||||
{
|
||||
dup_fd = nv_cap_validate_and_dup_fd(nvlink_caps.fabric_mgmt, fd);
|
||||
if (dup_fd < 0)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS,
|
||||
"Failed to validate the fabric mgmt capability\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "Unknown capability specified\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return dup_fd;
|
||||
}
|
||||
|
||||
void nvlink_cap_release(int fd)
|
||||
{
|
||||
if (fd < 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv_cap_close_fd(fd);
|
||||
}
|
||||
|
||||
void nvlink_cap_exit(void)
|
||||
{
|
||||
if (nvlink_caps.fabric_mgmt != NULL)
|
||||
{
|
||||
nv_cap_destroy_entry(nvlink_caps.fabric_mgmt);
|
||||
nvlink_caps.fabric_mgmt = NULL;
|
||||
}
|
||||
|
||||
if (nvlink_caps.root != NULL)
|
||||
{
|
||||
nv_cap_destroy_entry(nvlink_caps.root);
|
||||
nvlink_caps.root = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int nvlink_cap_init(const char *path)
|
||||
{
|
||||
if (path == NULL)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "Invalid path: %s\n", path);
|
||||
return -1;
|
||||
}
|
||||
|
||||
nvlink_caps.root = nv_cap_init(path);
|
||||
if (nvlink_caps.root == NULL)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "Failed to initialize capabilities\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
nvlink_caps.fabric_mgmt = nv_cap_create_file_entry(nvlink_caps.root,
|
||||
NVLINK_CAP_FABRIC_MGMT,
|
||||
S_IRUSR);
|
||||
if (nvlink_caps.fabric_mgmt == NULL)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "Failed to create fabric-mgmt entry\n");
|
||||
nvlink_cap_exit();
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
38
kernel-open/nvidia/nvlink_caps.h
Normal file
38
kernel-open/nvidia/nvlink_caps.h
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NVLINK_CAPS_H_
|
||||
#define _NVLINK_CAPS_H_
|
||||
|
||||
/* List of supported capability type */
|
||||
#define NVLINK_CAP_FABRIC_MANAGEMENT 0
|
||||
|
||||
/* Max supported capabilities count */
|
||||
#define NVLINK_CAP_COUNT 1
|
||||
|
||||
int nvlink_cap_init(const char *path);
|
||||
void nvlink_cap_exit(void);
|
||||
int nvlink_cap_acquire(int fd, NvU32 type);
|
||||
void nvlink_cap_release(int fd);
|
||||
|
||||
#endif //_NVLINK_CAPS_H_
|
||||
176
kernel-open/nvidia/nvlink_common.h
Normal file
176
kernel-open/nvidia/nvlink_common.h
Normal file
@@ -0,0 +1,176 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NVLINK_COMMON_H_
|
||||
#define _NVLINK_COMMON_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nvCpuUuid.h"
|
||||
#include "nvlink_errors.h"
|
||||
|
||||
|
||||
#ifndef NULL
|
||||
#define NULL ((void *)0)
|
||||
#endif
|
||||
|
||||
// nvlink pci bar information
|
||||
struct nvlink_pci_bar_info
|
||||
{
|
||||
NvU64 busAddress;
|
||||
NvU64 baseAddr;
|
||||
NvU64 barSize;
|
||||
NvU32 offset;
|
||||
void *pBar;
|
||||
};
|
||||
|
||||
#define MAX_NVLINK_BARS 2
|
||||
|
||||
// nvlink pci information
|
||||
struct nvlink_pci_info
|
||||
{
|
||||
NvU32 domain;
|
||||
NvU8 bus;
|
||||
NvU8 device;
|
||||
NvU8 function;
|
||||
NvU32 pciDeviceId;
|
||||
NvU32 irq;
|
||||
NvBool intHooked;
|
||||
struct nvlink_pci_bar_info bars[MAX_NVLINK_BARS];
|
||||
};
|
||||
|
||||
// nvlink detailed device information
|
||||
struct nvlink_detailed_device_info
|
||||
{
|
||||
char *deviceName;
|
||||
NvU64 deviceType;
|
||||
NvU8 *devUuid;
|
||||
NvBool bInitialized;
|
||||
|
||||
|
||||
|
||||
void *dev_info; // Endpoint driver device info opaque
|
||||
// to core lib. Passed from end point
|
||||
// driver to core
|
||||
|
||||
struct nvlink_pci_info *pciInfo;
|
||||
};
|
||||
|
||||
// nvlink device registration parameters
|
||||
struct nvlink_device_register_params
|
||||
{
|
||||
//
|
||||
// Core lib device info opaque to endpoint driver
|
||||
// Passed from core lib to endpoint driver
|
||||
//
|
||||
void **deviceHandle;
|
||||
char *driverName;
|
||||
|
||||
struct nvlink_detailed_device_info *device_params;
|
||||
};
|
||||
|
||||
// nvlink detailed link information
|
||||
struct nvlink_detailed_link_info
|
||||
{
|
||||
void *deviceHandle; // Core library device handle passed
|
||||
// to endpoint driver during device
|
||||
// registration
|
||||
|
||||
void *link_info; // End point driver link info opaque
|
||||
// to core lib. Passed from end point
|
||||
// driver to core
|
||||
|
||||
char *linkName;
|
||||
NvU32 linkNumber;
|
||||
NvU32 version;
|
||||
NvBool bAcCoupled;
|
||||
const void *link_handlers;
|
||||
};
|
||||
|
||||
// nvlink link registration parameters
|
||||
struct nvlink_link_register_params
|
||||
{
|
||||
//
|
||||
// Core lib link info opaque to endpoint driver
|
||||
// Passed from core lib to endpoint driver
|
||||
//
|
||||
void **linkHandle;
|
||||
|
||||
struct nvlink_detailed_link_info *link_params;
|
||||
};
|
||||
|
||||
// nvlink client device handle
|
||||
struct nvlink_device_handle
|
||||
{
|
||||
NvU32 linkMask;
|
||||
struct nvlink_pci_info pciInfo;
|
||||
};
|
||||
|
||||
#define NVLINK_PCI_DEV_FMT "%04x:%02x:%02x.%x"
|
||||
#define NVLINK_PCI_DEV_FMT_ARGS(info) (info)->domain, \
|
||||
(info)->bus, \
|
||||
(info)->device, \
|
||||
(info)->function
|
||||
|
||||
// nvlink connection information
|
||||
struct nvlink_conn_info
|
||||
{
|
||||
NvU32 domain;
|
||||
NvU16 bus;
|
||||
NvU16 device;
|
||||
NvU16 function;
|
||||
NvU32 pciDeviceId;
|
||||
NvU8 devUuid[NV_UUID_LEN];
|
||||
NvU64 deviceType;
|
||||
NvU32 linkNumber;
|
||||
NvBool bConnected;
|
||||
NvU64 chipSid;
|
||||
};
|
||||
|
||||
// nvlink ioctrl params
|
||||
struct nvlink_ioctrl_params
|
||||
{
|
||||
void *osPrivate;
|
||||
NvU32 cmd;
|
||||
void *buf;
|
||||
NvU32 size;
|
||||
};
|
||||
|
||||
// Typedefs
|
||||
typedef struct nvlink_pci_bar_info nvlink_pci_bar_info;
|
||||
typedef struct nvlink_pci_info nvlink_pci_info;
|
||||
typedef struct nvlink_detailed_device_info nvlink_detailed_device_info;
|
||||
typedef struct nvlink_detailed_link_info nvlink_detailed_link_info;
|
||||
typedef struct nvlink_device_register_params nvlink_device_register_params;
|
||||
typedef struct nvlink_link_register_params nvlink_link_register_params;
|
||||
typedef struct nvlink_conn_info nvlink_conn_info;
|
||||
typedef struct nvlink_ioctrl_params nvlink_ioctrl_params;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //_NVLINK_COMMON_H_
|
||||
47
kernel-open/nvidia/nvlink_errors.h
Normal file
47
kernel-open/nvidia/nvlink_errors.h
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NVLINK_ERRORS_H_
|
||||
#define _NVLINK_ERRORS_H_
|
||||
|
||||
typedef int NvlStatus;
|
||||
|
||||
#define NVL_SUCCESS (NvlStatus) 0
|
||||
#define NVL_BAD_ARGS (NvlStatus) 1
|
||||
#define NVL_NO_MEM (NvlStatus) 2
|
||||
#define NVL_NOT_FOUND (NvlStatus) 3
|
||||
#define NVL_INITIALIZATION_PARTIAL_FAILURE (NvlStatus) 4
|
||||
#define NVL_INITIALIZATION_TOTAL_FAILURE (NvlStatus) 5
|
||||
#define NVL_PCI_ERROR (NvlStatus) 6
|
||||
#define NVL_ERR_GENERIC (NvlStatus) 7
|
||||
#define NVL_ERR_INVALID_STATE (NvlStatus) 8
|
||||
#define NVL_UNBOUND_DEVICE (NvlStatus) 9
|
||||
#define NVL_MORE_PROCESSING_REQUIRED (NvlStatus)10
|
||||
#define NVL_IO_ERROR (NvlStatus)11
|
||||
#define NVL_ERR_STATE_IN_USE (NvlStatus)12
|
||||
#define NVL_ERR_NOT_SUPPORTED (NvlStatus)13
|
||||
#define NVL_ERR_NOT_IMPLEMENTED (NvlStatus)14
|
||||
#define NVL_ERR_INSUFFICIENT_PERMISSIONS (NvlStatus)15
|
||||
#define NVL_ERR_OPERATING_SYSTEM (NvlStatus)16
|
||||
|
||||
#endif // _NVLINK_ERRORS_H_
|
||||
53
kernel-open/nvidia/nvlink_export.h
Normal file
53
kernel-open/nvidia/nvlink_export.h
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NVLINK_EXPORT_H_
|
||||
#define _NVLINK_EXPORT_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "nvlink_common.h"
|
||||
|
||||
/*
|
||||
* Initializes core lib and does all that is needed
|
||||
* to access NVLINK functionality on the current platform.
|
||||
*/
|
||||
NvlStatus nvlink_lib_initialize(void);
|
||||
|
||||
/*
|
||||
* Frees any related resources and then unloads core lib.
|
||||
*/
|
||||
NvlStatus nvlink_lib_unload(void);
|
||||
|
||||
/*
|
||||
* Entry point for nvlink ioctl calls.
|
||||
*/
|
||||
NvlStatus nvlink_lib_ioctl_ctrl(nvlink_ioctrl_params *ctrl_params);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //_NVLINK_EXPORT_H_
|
||||
643
kernel-open/nvidia/nvlink_linux.c
Normal file
643
kernel-open/nvidia/nvlink_linux.c
Normal file
@@ -0,0 +1,643 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "conftest.h"
|
||||
|
||||
#include "nvlink_os.h"
|
||||
#include "nvlink_linux.h"
|
||||
#include "nvlink_errors.h"
|
||||
#include "nvlink_export.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-procfs.h"
|
||||
#include "nv-time.h"
|
||||
#include "nvlink_caps.h"
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#define MAX_ERROR_STRING 512
|
||||
|
||||
typedef struct nvlink_file_private
|
||||
{
|
||||
struct
|
||||
{
|
||||
/* A duped file descriptor for fabric_mgmt capability */
|
||||
int fabric_mgmt;
|
||||
} capability_fds;
|
||||
} nvlink_file_private_t;
|
||||
|
||||
#define NVLINK_SET_FILE_PRIVATE(filp, data) ((filp)->private_data = (data))
|
||||
#define NVLINK_GET_FILE_PRIVATE(filp) ((nvlink_file_private_t *)(filp)->private_data)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
struct mutex lock;
|
||||
NvBool initialized;
|
||||
struct cdev cdev;
|
||||
dev_t devno;
|
||||
int opened;
|
||||
int major_devnum;
|
||||
} _nvlink_drvctx;
|
||||
|
||||
|
||||
// nvlink driver local state
|
||||
static _nvlink_drvctx nvlink_drvctx;
|
||||
|
||||
#if defined(CONFIG_PROC_FS)
|
||||
#define NV_DEFINE_SINGLE_NVLINK_PROCFS_FILE(name) \
|
||||
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nv_system_pm_lock)
|
||||
#endif
|
||||
|
||||
#define NVLINK_PROCFS_DIR "driver/nvidia-nvlink"
|
||||
|
||||
static struct proc_dir_entry *nvlink_procfs_dir = NULL;
|
||||
|
||||
#if defined(CONFIG_PROC_FS)
|
||||
static int nvlink_is_procfs_available = 1;
|
||||
#else
|
||||
static int nvlink_is_procfs_available = 0;
|
||||
#endif
|
||||
|
||||
static struct proc_dir_entry *nvlink_permissions = NULL;
|
||||
|
||||
static int nv_procfs_read_permissions(struct seq_file *s, void *v)
|
||||
{
|
||||
// Restrict device node permissions - 0666.
|
||||
seq_printf(s, "%s: %u\n", "DeviceFileMode", 438);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
NV_DEFINE_SINGLE_NVLINK_PROCFS_FILE(permissions);
|
||||
|
||||
static void nvlink_permissions_exit(void)
|
||||
{
|
||||
if (!nvlink_permissions)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
NV_REMOVE_PROC_ENTRY(nvlink_permissions);
|
||||
nvlink_permissions = NULL;
|
||||
}
|
||||
|
||||
static int nvlink_permissions_init(void)
|
||||
{
|
||||
if (!nvlink_procfs_dir)
|
||||
{
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
nvlink_permissions = NV_CREATE_PROC_FILE("permissions",
|
||||
nvlink_procfs_dir,
|
||||
permissions,
|
||||
NULL);
|
||||
if (!nvlink_permissions)
|
||||
{
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nvlink_procfs_exit(void)
|
||||
{
|
||||
nvlink_permissions_exit();
|
||||
|
||||
if (!nvlink_procfs_dir)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
NV_REMOVE_PROC_ENTRY(nvlink_procfs_dir);
|
||||
nvlink_procfs_dir = NULL;
|
||||
}
|
||||
|
||||
static int nvlink_procfs_init(void)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!nvlink_is_procfs_available)
|
||||
{
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
nvlink_procfs_dir = NV_CREATE_PROC_DIR(NVLINK_PROCFS_DIR, NULL);
|
||||
if (!nvlink_procfs_dir)
|
||||
{
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
rc = nvlink_permissions_init();
|
||||
if (rc < 0)
|
||||
{
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
|
||||
nvlink_procfs_exit();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int nvlink_fops_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int rc = 0;
|
||||
nvlink_file_private_t *private = NULL;
|
||||
|
||||
nvlink_print(NVLINK_DBG_INFO, "nvlink driver open\n");
|
||||
|
||||
mutex_lock(&nvlink_drvctx.lock);
|
||||
|
||||
// nvlink lib driver is currently exclusive open.
|
||||
if (nvlink_drvctx.opened)
|
||||
{
|
||||
rc = -EBUSY;
|
||||
goto open_error;
|
||||
}
|
||||
|
||||
private = (nvlink_file_private_t *)nvlink_malloc(sizeof(*private));
|
||||
if (private == NULL)
|
||||
{
|
||||
rc = -ENOMEM;
|
||||
goto open_error;
|
||||
}
|
||||
|
||||
private->capability_fds.fabric_mgmt = -1;
|
||||
NVLINK_SET_FILE_PRIVATE(filp, private);
|
||||
|
||||
// mark our state as opened
|
||||
nvlink_drvctx.opened = NV_TRUE;
|
||||
|
||||
open_error:
|
||||
mutex_unlock(&nvlink_drvctx.lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int nvlink_fops_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
nvlink_file_private_t *private = NVLINK_GET_FILE_PRIVATE(filp);
|
||||
|
||||
nvlink_print(NVLINK_DBG_INFO, "nvlink driver close\n");
|
||||
|
||||
WARN_ON(private == NULL);
|
||||
|
||||
mutex_lock(&nvlink_drvctx.lock);
|
||||
|
||||
if (private->capability_fds.fabric_mgmt > 0)
|
||||
{
|
||||
nvlink_cap_release(private->capability_fds.fabric_mgmt);
|
||||
private->capability_fds.fabric_mgmt = -1;
|
||||
}
|
||||
|
||||
nvlink_free(filp->private_data);
|
||||
NVLINK_SET_FILE_PRIVATE(filp, NULL);
|
||||
|
||||
// mark the device as not opened
|
||||
nvlink_drvctx.opened = NV_FALSE;
|
||||
|
||||
mutex_unlock(&nvlink_drvctx.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvlink_fops_ioctl(struct inode *inode,
|
||||
struct file *filp,
|
||||
unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
nvlink_ioctrl_params ctrl_params = {0};
|
||||
int param_size = _IOC_SIZE(cmd);
|
||||
void *param_buf = NULL;
|
||||
NvlStatus ret_val = 0;
|
||||
int rc = 0;
|
||||
|
||||
// no buffer for simple _IO types
|
||||
if (param_size)
|
||||
{
|
||||
// allocate a buffer to hold user input
|
||||
param_buf = kzalloc(param_size, GFP_KERNEL);
|
||||
if (NULL == param_buf)
|
||||
{
|
||||
rc = -ENOMEM;
|
||||
goto nvlink_ioctl_fail;
|
||||
}
|
||||
|
||||
// copy user input to kernel buffers. Simple _IOR() ioctls can skip this step.
|
||||
if (_IOC_DIR(cmd) & _IOC_WRITE)
|
||||
{
|
||||
// copy user input to local buffer
|
||||
if (copy_from_user(param_buf, (const void *)arg, param_size))
|
||||
{
|
||||
rc = -EFAULT;
|
||||
goto nvlink_ioctl_fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ctrl_params.osPrivate = filp->private_data;
|
||||
ctrl_params.cmd = _IOC_NR(cmd);
|
||||
ctrl_params.buf = param_buf;
|
||||
ctrl_params.size = param_size;
|
||||
|
||||
ret_val = nvlink_lib_ioctl_ctrl(&ctrl_params);
|
||||
if (NVL_SUCCESS != ret_val)
|
||||
{
|
||||
rc = -EINVAL;
|
||||
goto nvlink_ioctl_fail;
|
||||
}
|
||||
|
||||
// no copy for write-only ioctl
|
||||
if ((param_size) && (_IOC_DIR(cmd) & _IOC_READ))
|
||||
{
|
||||
if (copy_to_user((void *)arg, ctrl_params.buf, ctrl_params.size))
|
||||
{
|
||||
rc = -EFAULT;
|
||||
goto nvlink_ioctl_fail;
|
||||
}
|
||||
}
|
||||
|
||||
nvlink_ioctl_fail:
|
||||
if (param_buf)
|
||||
{
|
||||
kfree(param_buf);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
#define NV_FILE_INODE(file) (file)->f_inode
|
||||
|
||||
static long nvlink_fops_unlocked_ioctl(struct file *file,
|
||||
unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return nvlink_fops_ioctl(NV_FILE_INODE(file), file, cmd, arg);
|
||||
}
|
||||
|
||||
|
||||
static const struct file_operations nvlink_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = nvlink_fops_open,
|
||||
.release = nvlink_fops_release,
|
||||
#if defined(NV_FILE_OPERATIONS_HAS_IOCTL)
|
||||
.ioctl = nvlink_fops_ioctl,
|
||||
#endif
|
||||
.unlocked_ioctl = nvlink_fops_unlocked_ioctl,
|
||||
};
|
||||
|
||||
int __init nvlink_core_init(void)
|
||||
{
|
||||
NvlStatus ret_val;
|
||||
int rc;
|
||||
|
||||
if (NV_TRUE == nvlink_drvctx.initialized)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "nvlink core interface already initialized\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
mutex_init(&nvlink_drvctx.lock);
|
||||
|
||||
ret_val = nvlink_lib_initialize();
|
||||
if (NVL_SUCCESS != ret_val)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "Failed to initialize driver : %d\n", ret_val);
|
||||
rc = -ENODEV;
|
||||
goto nvlink_lib_initialize_fail;
|
||||
}
|
||||
|
||||
rc = alloc_chrdev_region(&nvlink_drvctx.devno, 0, NVLINK_NUM_MINOR_DEVICES,
|
||||
NVLINK_DEVICE_NAME);
|
||||
if (rc < 0)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "alloc_chrdev_region failed: %d\n", rc);
|
||||
goto alloc_chrdev_region_fail;
|
||||
}
|
||||
|
||||
nvlink_drvctx.major_devnum = MAJOR(nvlink_drvctx.devno);
|
||||
nvlink_print(NVLINK_DBG_INFO, "Nvlink Core is being initialized, major device number %d\n",
|
||||
nvlink_drvctx.major_devnum);
|
||||
|
||||
cdev_init(&nvlink_drvctx.cdev, &nvlink_fops);
|
||||
nvlink_drvctx.cdev.owner = THIS_MODULE;
|
||||
rc = cdev_add(&nvlink_drvctx.cdev, nvlink_drvctx.devno, NVLINK_NUM_MINOR_DEVICES);
|
||||
if (rc < 0)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, " Unable to create cdev\n");
|
||||
goto cdev_add_fail;
|
||||
}
|
||||
|
||||
rc = nvlink_procfs_init();
|
||||
if (rc < 0)
|
||||
{
|
||||
goto procfs_init_fail;
|
||||
}
|
||||
|
||||
rc = nvlink_cap_init(NVLINK_PROCFS_DIR);
|
||||
if (rc < 0)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, " Unable to create capability\n");
|
||||
goto cap_init_fail;
|
||||
}
|
||||
|
||||
nvlink_drvctx.initialized = NV_TRUE;
|
||||
|
||||
return 0;
|
||||
|
||||
cap_init_fail:
|
||||
nvlink_procfs_exit();
|
||||
|
||||
procfs_init_fail:
|
||||
cdev_del(&nvlink_drvctx.cdev);
|
||||
|
||||
cdev_add_fail:
|
||||
unregister_chrdev_region(nvlink_drvctx.devno, NVLINK_NUM_MINOR_DEVICES);
|
||||
|
||||
alloc_chrdev_region_fail:
|
||||
nvlink_lib_unload();
|
||||
|
||||
nvlink_lib_initialize_fail:
|
||||
nv_mutex_destroy(&nvlink_drvctx.lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void nvlink_core_exit(void)
|
||||
{
|
||||
if (NV_FALSE == nvlink_drvctx.initialized)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nvlink_cap_exit();
|
||||
|
||||
nvlink_procfs_exit();
|
||||
|
||||
cdev_del(&nvlink_drvctx.cdev);
|
||||
|
||||
unregister_chrdev_region(nvlink_drvctx.devno, NVLINK_NUM_MINOR_DEVICES);
|
||||
|
||||
nvlink_lib_unload();
|
||||
|
||||
nv_mutex_destroy(&nvlink_drvctx.lock);
|
||||
|
||||
nvlink_print(NVLINK_DBG_INFO, "Unregistered the Nvlink Core, major device number %d\n",
|
||||
nvlink_drvctx.major_devnum);
|
||||
}
|
||||
|
||||
void
|
||||
nvlink_print
|
||||
(
|
||||
const char *file,
|
||||
int line,
|
||||
const char *function,
|
||||
int log_level,
|
||||
const char *fmt,
|
||||
...
|
||||
)
|
||||
{
|
||||
va_list arglist;
|
||||
char nv_string[MAX_ERROR_STRING];
|
||||
char *sys_log_level;
|
||||
|
||||
switch (log_level) {
|
||||
case NVLINK_DBG_LEVEL_INFO:
|
||||
sys_log_level = KERN_INFO;
|
||||
break;
|
||||
case NVLINK_DBG_LEVEL_SETUP:
|
||||
sys_log_level = KERN_DEBUG;
|
||||
break;
|
||||
case NVLINK_DBG_LEVEL_USERERRORS:
|
||||
sys_log_level = KERN_NOTICE;
|
||||
break;
|
||||
case NVLINK_DBG_LEVEL_WARNINGS:
|
||||
sys_log_level = KERN_WARNING;
|
||||
break;
|
||||
case NVLINK_DBG_LEVEL_ERRORS:
|
||||
sys_log_level = KERN_ERR;
|
||||
break;
|
||||
default:
|
||||
sys_log_level = KERN_INFO;
|
||||
break;
|
||||
}
|
||||
|
||||
va_start(arglist, fmt);
|
||||
vsnprintf(nv_string, sizeof(nv_string), fmt, arglist);
|
||||
va_end(arglist);
|
||||
|
||||
nv_string[sizeof(nv_string) - 1] = '\0';
|
||||
printk("%snvidia-nvlink: %s", sys_log_level, nv_string);
|
||||
}
|
||||
|
||||
void * nvlink_malloc(NvLength size)
|
||||
{
|
||||
return kmalloc(size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void nvlink_free(void *ptr)
|
||||
{
|
||||
return kfree(ptr);
|
||||
}
|
||||
|
||||
char * nvlink_strcpy(char *dest, const char *src)
|
||||
{
|
||||
return strcpy(dest, src);
|
||||
}
|
||||
|
||||
int nvlink_strcmp(const char *dest, const char *src)
|
||||
{
|
||||
return strcmp(dest, src);
|
||||
}
|
||||
|
||||
NvLength nvlink_strlen(const char *s)
|
||||
{
|
||||
return strlen(s);
|
||||
}
|
||||
|
||||
int nvlink_snprintf(char *dest, NvLength size, const char *fmt, ...)
|
||||
{
|
||||
va_list arglist;
|
||||
int chars_written;
|
||||
|
||||
va_start(arglist, fmt);
|
||||
chars_written = vsnprintf(dest, size, fmt, arglist);
|
||||
va_end(arglist);
|
||||
|
||||
return chars_written;
|
||||
}
|
||||
|
||||
NvU32 nvlink_memRd32(const volatile void * address)
|
||||
{
|
||||
return (*(const volatile NvU32*)(address));
|
||||
}
|
||||
|
||||
void nvlink_memWr32(volatile void *address, NvU32 data)
|
||||
{
|
||||
(*(volatile NvU32 *)(address)) = data;
|
||||
}
|
||||
|
||||
NvU64 nvlink_memRd64(const volatile void * address)
|
||||
{
|
||||
return (*(const volatile NvU64 *)(address));
|
||||
}
|
||||
|
||||
void nvlink_memWr64(volatile void *address, NvU64 data)
|
||||
{
|
||||
(*(volatile NvU64 *)(address)) = data;
|
||||
}
|
||||
|
||||
void * nvlink_memset(void *dest, int value, NvLength size)
|
||||
{
|
||||
return memset(dest, value, size);
|
||||
}
|
||||
|
||||
void * nvlink_memcpy(void *dest, const void *src, NvLength size)
|
||||
{
|
||||
return memcpy(dest, src, size);
|
||||
}
|
||||
|
||||
int nvlink_memcmp(const void *s1, const void *s2, NvLength size)
|
||||
{
|
||||
return memcmp(s1, s2, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sleep for specified milliseconds. Yields the CPU to scheduler.
|
||||
*/
|
||||
void nvlink_sleep(unsigned int ms)
|
||||
{
|
||||
NV_STATUS status;
|
||||
|
||||
status = nv_sleep_ms(ms);
|
||||
|
||||
if (status != NV_OK)
|
||||
{
|
||||
if (printk_ratelimit())
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "NVLink: requested sleep duration"
|
||||
" %d msec exceeded %d msec\n",
|
||||
ms, NV_MAX_ISR_DELAY_MS);
|
||||
WARN_ON(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void nvlink_assert(int cond)
|
||||
{
|
||||
if ((cond) == 0x0)
|
||||
{
|
||||
if (printk_ratelimit())
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "NVLink: Assertion failed!\n");
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
dbg_breakpoint();
|
||||
}
|
||||
}
|
||||
|
||||
void * nvlink_allocLock()
|
||||
{
|
||||
struct semaphore *sema;
|
||||
|
||||
sema = nvlink_malloc(sizeof(*sema));
|
||||
if (sema == NULL)
|
||||
{
|
||||
nvlink_print(NVLINK_DBG_ERRORS, "Failed to allocate sema!\n");
|
||||
return NULL;
|
||||
}
|
||||
sema_init(sema, 1);
|
||||
|
||||
return sema;
|
||||
}
|
||||
|
||||
void nvlink_acquireLock(void *hLock)
|
||||
{
|
||||
down(hLock);
|
||||
}
|
||||
|
||||
void nvlink_releaseLock(void *hLock)
|
||||
{
|
||||
up(hLock);
|
||||
}
|
||||
|
||||
void nvlink_freeLock(void *hLock)
|
||||
{
|
||||
if (NULL == hLock)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
NVLINK_FREE(hLock);
|
||||
}
|
||||
|
||||
NvBool nvlink_isLockOwner(void *hLock)
|
||||
{
|
||||
return NV_TRUE;
|
||||
}
|
||||
|
||||
NvlStatus nvlink_acquire_fabric_mgmt_cap(void *osPrivate, NvU64 capDescriptor)
|
||||
{
|
||||
int dup_fd = -1;
|
||||
nvlink_file_private_t *private_data = (nvlink_file_private_t *)osPrivate;
|
||||
|
||||
if (private_data == NULL)
|
||||
{
|
||||
return NVL_BAD_ARGS;
|
||||
}
|
||||
|
||||
dup_fd = nvlink_cap_acquire((int)capDescriptor,
|
||||
NVLINK_CAP_FABRIC_MANAGEMENT);
|
||||
if (dup_fd < 0)
|
||||
{
|
||||
return NVL_ERR_OPERATING_SYSTEM;
|
||||
}
|
||||
|
||||
private_data->capability_fds.fabric_mgmt = dup_fd;
|
||||
return NVL_SUCCESS;
|
||||
}
|
||||
|
||||
int nvlink_is_fabric_manager(void *osPrivate)
|
||||
{
|
||||
nvlink_file_private_t *private_data = (nvlink_file_private_t *)osPrivate;
|
||||
|
||||
/* Make sure that fabric mgmt capbaility fd is valid */
|
||||
if ((private_data == NULL) ||
|
||||
(private_data->capability_fds.fabric_mgmt < 0))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int nvlink_is_admin(void)
|
||||
{
|
||||
return NV_IS_SUSER();
|
||||
}
|
||||
64
kernel-open/nvidia/nvlink_linux.h
Normal file
64
kernel-open/nvidia/nvlink_linux.h
Normal file
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NVLINK_LINUX_H_
|
||||
#define _NVLINK_LINUX_H_
|
||||
|
||||
#include <linux/init.h> // for entry/exit macros
|
||||
#include <linux/sched.h> // for "struct task_struct"
|
||||
#include <linux/kernel.h> // for printk priority macros
|
||||
#include <linux/fs.h>
|
||||
|
||||
|
||||
#define NVLINK_DEVICE_NAME "nvidia-nvlink"
|
||||
#define NVLINK_NUM_MINOR_DEVICES 1
|
||||
|
||||
/*
|
||||
* @Brief : Debug Breakpoint implementation
|
||||
*
|
||||
* @Description :
|
||||
*
|
||||
* @returns void
|
||||
*/
|
||||
static inline void
|
||||
dbg_breakpoint(void)
|
||||
{
|
||||
/* OS specific breakpoint implemented for NVLink library */
|
||||
#if defined(DEBUG)
|
||||
#if defined(CONFIG_X86_REMOTE_DEBUG) || defined(CONFIG_KGDB) || defined(CONFIG_XMON)
|
||||
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
|
||||
__asm__ __volatile__ ("int $3");
|
||||
#elif defined(NVCPU_ARM)
|
||||
__asm__ __volatile__ (".word %c0" :: "i" (KGDB_COMPILED_BREAK));
|
||||
#elif defined(NVCPU_AARCH64)
|
||||
# warning "Need to implement dbg_breakpoint() for aarch64"
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
__asm__ __volatile__ ("trap");
|
||||
#endif /* NVCPU_X86 || NVCPU_X86_64 */
|
||||
#elif defined(CONFIG_KDB)
|
||||
KDB_ENTER();
|
||||
#endif /* CONFIG_X86_REMOTE_DEBUG || CONFIG_KGDB || CONFIG_XMON */
|
||||
#endif /* DEBUG */
|
||||
}
|
||||
|
||||
#endif //_NVLINK_LINUX_H_
|
||||
86
kernel-open/nvidia/nvlink_os.h
Normal file
86
kernel-open/nvidia/nvlink_os.h
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NVLINK_OS_H_
|
||||
#define _NVLINK_OS_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "nvlink_common.h"
|
||||
|
||||
#define NVLINK_FREE(x) nvlink_free((void *)x)
|
||||
|
||||
// Memory management functions
|
||||
void * nvlink_malloc(NvLength);
|
||||
void nvlink_free(void *);
|
||||
void * nvlink_memset(void *, int, NvLength);
|
||||
void * nvlink_memcpy(void *, const void *, NvLength);
|
||||
int nvlink_memcmp(const void *, const void *, NvLength);
|
||||
NvU32 nvlink_memRd32(const volatile void *);
|
||||
void nvlink_memWr32(volatile void *, NvU32);
|
||||
NvU64 nvlink_memRd64(const volatile void *);
|
||||
void nvlink_memWr64(volatile void *, NvU64);
|
||||
|
||||
// String management functions
|
||||
char * nvlink_strcpy(char *, const char *);
|
||||
NvLength nvlink_strlen(const char *);
|
||||
int nvlink_strcmp(const char *, const char *);
|
||||
int nvlink_snprintf(char *, NvLength, const char *, ...);
|
||||
|
||||
// Locking support functions
|
||||
void * nvlink_allocLock(void);
|
||||
void nvlink_acquireLock(void *);
|
||||
NvBool nvlink_isLockOwner(void *);
|
||||
void nvlink_releaseLock(void *);
|
||||
void nvlink_freeLock(void *);
|
||||
|
||||
// Miscellaneous functions
|
||||
void nvlink_assert(int expression);
|
||||
void nvlink_sleep(unsigned int ms);
|
||||
void nvlink_print(const char *, int, const char *, int, const char *, ...);
|
||||
int nvlink_is_admin(void);
|
||||
|
||||
// Capability functions
|
||||
NvlStatus nvlink_acquire_fabric_mgmt_cap(void *osPrivate, NvU64 capDescriptor);
|
||||
int nvlink_is_fabric_manager(void *osPrivate);
|
||||
|
||||
#define NVLINK_DBG_LEVEL_INFO 0x0
|
||||
#define NVLINK_DBG_LEVEL_SETUP 0x1
|
||||
#define NVLINK_DBG_LEVEL_USERERRORS 0x2
|
||||
#define NVLINK_DBG_LEVEL_WARNINGS 0x3
|
||||
#define NVLINK_DBG_LEVEL_ERRORS 0x4
|
||||
|
||||
#define NVLINK_DBG_WHERE __FILE__, __LINE__, __FUNCTION__
|
||||
#define NVLINK_DBG_INFO NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_INFO
|
||||
#define NVLINK_DBG_SETUP NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_SETUP
|
||||
#define NVLINK_DBG_USERERRORS NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_USERERRORS
|
||||
#define NVLINK_DBG_WARNINGS NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_WARNINGS
|
||||
#define NVLINK_DBG_ERRORS NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_ERRORS
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //_NVLINK_OS_H_
|
||||
61
kernel-open/nvidia/nvlink_pci.h
Normal file
61
kernel-open/nvidia/nvlink_pci.h
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NVLINK_PCI_H_
|
||||
#define _NVLINK_PCI_H_
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include "nvlink_common.h"
|
||||
|
||||
#define NV_PCI_RESOURCE_START(dev, bar) pci_resource_start(dev, (bar))
|
||||
#define NV_PCI_RESOURCE_SIZE(dev, bar) pci_resource_len(dev, (bar))
|
||||
#define NV_PCI_RESOURCE_FLAGS(dev, bar) pci_resource_flags(dev, (bar))
|
||||
|
||||
#if defined(NVCPU_X86)
|
||||
#define NV_PCI_RESOURCE_VALID(dev, bar) \
|
||||
((NV_PCI_RESOURCE_START(dev, bar) != 0) && \
|
||||
(NV_PCI_RESOURCE_SIZE(dev, bar) != 0) && \
|
||||
(!((NV_PCI_RESOURCE_FLAGS(dev, bar) & PCI_BASE_ADDRESS_MEM_TYPE_64) && \
|
||||
((NV_PCI_RESOURCE_START(dev, bar) >> PAGE_SHIFT) > 0xfffffULL))))
|
||||
#else
|
||||
#define NV_PCI_RESOURCE_VALID(dev, bar) \
|
||||
((NV_PCI_RESOURCE_START(dev, bar) != 0) && \
|
||||
(NV_PCI_RESOURCE_SIZE(dev, bar) != 0))
|
||||
#endif
|
||||
|
||||
#define NV_PCI_DOMAIN_NUMBER(dev) (NvU32)pci_domain_nr(dev->bus)
|
||||
#define NV_PCI_BUS_NUMBER(dev) (dev)->bus->number
|
||||
#define NV_PCI_DEVFN(dev) (dev)->devfn
|
||||
#define NV_PCI_SLOT_NUMBER(dev) PCI_SLOT(NV_PCI_DEVFN(dev))
|
||||
|
||||
#define NV_PCI_DEV_FMT NVLINK_PCI_DEV_FMT
|
||||
#define NV_PCI_DEV_FMT_ARGS(dev) \
|
||||
NV_PCI_DOMAIN_NUMBER(dev), NV_PCI_BUS_NUMBER(dev), \
|
||||
NV_PCI_SLOT_NUMBER(dev), PCI_FUNC((dev)->devfn)
|
||||
|
||||
#define NVRM_PCICFG_NUM_BARS 6
|
||||
#define NVRM_PCICFG_BAR_OFFSET(i) (0x10 + (i) * 4)
|
||||
|
||||
#define NV_PCIE_CFG_MAX_OFFSET 0x1000
|
||||
|
||||
#endif // _NVLINK_PCI_H_
|
||||
53
kernel-open/nvidia/nvlink_proto.h
Normal file
53
kernel-open/nvidia/nvlink_proto.h
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NVLINK_PROTO_H_
|
||||
#define _NVLINK_PROTO_H_
|
||||
|
||||
#include "nvlink_common.h"
|
||||
|
||||
/*
|
||||
* Functions defined in nvlink_linux.c
|
||||
*/
|
||||
|
||||
int nvlink_core_init (void);
|
||||
void nvlink_core_exit (void);
|
||||
|
||||
|
||||
/*
|
||||
* Functions defined in nvswitch_linux.c
|
||||
*/
|
||||
int nvswitch_init (void);
|
||||
void nvswitch_exit (void);
|
||||
|
||||
|
||||
#if defined(NVCPU_AARCH64)
|
||||
/*
|
||||
* Functions defined in tegrashim_linux.c (Tegra only)
|
||||
*/
|
||||
int tegrashim_init (void);
|
||||
void tegrashim_exit (void);
|
||||
NvlStatus tegrashim_init_device (struct pci_dev *);
|
||||
#endif
|
||||
|
||||
#endif /* _NVLINK_PROTO_H_ */
|
||||
2136
kernel-open/nvidia/os-interface.c
Normal file
2136
kernel-open/nvidia/os-interface.c
Normal file
File diff suppressed because it is too large
Load Diff
287
kernel-open/nvidia/os-mlock.c
Normal file
287
kernel-open/nvidia/os-mlock.c
Normal file
@@ -0,0 +1,287 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
static inline int nv_follow_pfn(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
unsigned long *pfn)
|
||||
{
|
||||
#if defined(NV_UNSAFE_FOLLOW_PFN_PRESENT)
|
||||
return unsafe_follow_pfn(vma, address, pfn);
|
||||
#else
|
||||
return follow_pfn(vma, address, pfn);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Locates the PFNs for a user IO address range, and converts those to
|
||||
* their associated PTEs.
|
||||
*
|
||||
* @param[in] vma VMA that contains the virtual address range given by the
|
||||
* start and page count parameters.
|
||||
* @param[in] start Beginning of the virtual address range of the IO PTEs.
|
||||
* @param[in] page_count Number of pages containing the IO range being
|
||||
* mapped.
|
||||
* @param[in,out] pte_array Storage array for PTE addresses. Must be large
|
||||
* enough to contain at least page_count pointers.
|
||||
*
|
||||
* @return NV_OK if the PTEs were identified successfully, error otherwise.
|
||||
*/
|
||||
static NV_STATUS get_io_ptes(struct vm_area_struct *vma,
|
||||
NvUPtr start,
|
||||
NvU64 page_count,
|
||||
NvU64 **pte_array)
|
||||
{
|
||||
NvU64 i;
|
||||
unsigned long pfn;
|
||||
|
||||
for (i = 0; i < page_count; i++)
|
||||
{
|
||||
if (nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0)
|
||||
{
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
pte_array[i] = (NvU64 *)(pfn << PAGE_SHIFT);
|
||||
|
||||
if (i == 0)
|
||||
continue;
|
||||
|
||||
//
|
||||
// This interface is to be used for contiguous, uncacheable I/O regions.
|
||||
// Internally, osCreateOsDescriptorFromIoMemory() checks the user-provided
|
||||
// flags against this, and creates a single memory descriptor with the same
|
||||
// attributes. This check ensures the actual mapping supplied matches the
|
||||
// user's declaration. Ensure the PFNs represent a contiguous range,
|
||||
// error if they do not.
|
||||
//
|
||||
if ((NvU64)pte_array[i] != (((NvU64)pte_array[i-1]) + PAGE_SIZE))
|
||||
{
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
}
|
||||
}
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Pins user IO pages that have been mapped to the user processes virtual
|
||||
* address space with remap_pfn_range.
|
||||
*
|
||||
* @param[in] vma VMA that contains the virtual address range given by the
|
||||
* start and the page count.
|
||||
* @param[in] start Beginning of the virtual address range of the IO pages.
|
||||
* @param[in] page_count Number of pages to pin from start.
|
||||
* @param[in,out] page_array Storage array for pointers to the pinned pages.
|
||||
* Must be large enough to contain at least page_count
|
||||
* pointers.
|
||||
*
|
||||
* @return NV_OK if the pages were pinned successfully, error otherwise.
|
||||
*/
|
||||
static NV_STATUS get_io_pages(struct vm_area_struct *vma,
|
||||
NvUPtr start,
|
||||
NvU64 page_count,
|
||||
struct page **page_array)
|
||||
{
|
||||
NV_STATUS rmStatus = NV_OK;
|
||||
NvU64 i, pinned = 0;
|
||||
unsigned long pfn;
|
||||
|
||||
for (i = 0; i < page_count; i++)
|
||||
{
|
||||
if ((nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0) ||
|
||||
(!pfn_valid(pfn)))
|
||||
{
|
||||
rmStatus = NV_ERR_INVALID_ADDRESS;
|
||||
break;
|
||||
}
|
||||
|
||||
// Page-backed memory mapped to userspace with remap_pfn_range
|
||||
page_array[i] = pfn_to_page(pfn);
|
||||
get_page(page_array[i]);
|
||||
pinned++;
|
||||
}
|
||||
|
||||
if (pinned < page_count)
|
||||
{
|
||||
for (i = 0; i < pinned; i++)
|
||||
put_page(page_array[i]);
|
||||
rmStatus = NV_ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
return rmStatus;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_lookup_user_io_memory(
|
||||
void *address,
|
||||
NvU64 page_count,
|
||||
NvU64 **pte_array,
|
||||
void **page_array
|
||||
)
|
||||
{
|
||||
NV_STATUS rmStatus;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long pfn;
|
||||
NvUPtr start = (NvUPtr)address;
|
||||
void **result_array;
|
||||
|
||||
if (!NV_MAY_SLEEP())
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s(): invalid context!\n", __FUNCTION__);
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
rmStatus = os_alloc_mem((void **)&result_array, (page_count * sizeof(NvP64)));
|
||||
if (rmStatus != NV_OK)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: failed to allocate page table!\n");
|
||||
return rmStatus;
|
||||
}
|
||||
|
||||
nv_mmap_read_lock(mm);
|
||||
|
||||
// find the first VMA which intersects the interval start_addr..end_addr-1,
|
||||
vma = find_vma_intersection(mm, start, start+1);
|
||||
|
||||
// Verify that the given address range is contained in a single vma
|
||||
if ((vma == NULL) || ((vma->vm_flags & (VM_IO | VM_PFNMAP)) == 0) ||
|
||||
!((vma->vm_start <= start) &&
|
||||
((vma->vm_end - start) >> PAGE_SHIFT >= page_count)))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"Cannot map memory with base addr 0x%llx and size of 0x%llx pages\n",
|
||||
start ,page_count);
|
||||
rmStatus = NV_ERR_INVALID_ADDRESS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (nv_follow_pfn(vma, start, &pfn) < 0)
|
||||
{
|
||||
rmStatus = NV_ERR_INVALID_ADDRESS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (pfn_valid(pfn))
|
||||
{
|
||||
rmStatus = get_io_pages(vma, start, page_count, (struct page **)result_array);
|
||||
if (rmStatus == NV_OK)
|
||||
*page_array = (void *)result_array;
|
||||
}
|
||||
else
|
||||
{
|
||||
rmStatus = get_io_ptes(vma, start, page_count, (NvU64 **)result_array);
|
||||
if (rmStatus == NV_OK)
|
||||
*pte_array = (NvU64 *)result_array;
|
||||
}
|
||||
|
||||
done:
|
||||
nv_mmap_read_unlock(mm);
|
||||
|
||||
if (rmStatus != NV_OK)
|
||||
{
|
||||
os_free_mem(result_array);
|
||||
}
|
||||
|
||||
return rmStatus;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_lock_user_pages(
|
||||
void *address,
|
||||
NvU64 page_count,
|
||||
void **page_array,
|
||||
NvU32 flags
|
||||
)
|
||||
{
|
||||
NV_STATUS rmStatus;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct page **user_pages;
|
||||
NvU64 i, pinned;
|
||||
NvBool write = DRF_VAL(_LOCK_USER_PAGES, _FLAGS, _WRITE, flags), force = 0;
|
||||
int ret;
|
||||
|
||||
if (!NV_MAY_SLEEP())
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s(): invalid context!\n", __FUNCTION__);
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
rmStatus = os_alloc_mem((void **)&user_pages,
|
||||
(page_count * sizeof(*user_pages)));
|
||||
if (rmStatus != NV_OK)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: failed to allocate page table!\n");
|
||||
return rmStatus;
|
||||
}
|
||||
|
||||
nv_mmap_read_lock(mm);
|
||||
ret = NV_GET_USER_PAGES((unsigned long)address,
|
||||
page_count, write, force, user_pages, NULL);
|
||||
nv_mmap_read_unlock(mm);
|
||||
pinned = ret;
|
||||
|
||||
if (ret < 0)
|
||||
{
|
||||
os_free_mem(user_pages);
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
}
|
||||
else if (pinned < page_count)
|
||||
{
|
||||
for (i = 0; i < pinned; i++)
|
||||
put_page(user_pages[i]);
|
||||
os_free_mem(user_pages);
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
*page_array = user_pages;
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_unlock_user_pages(
|
||||
NvU64 page_count,
|
||||
void *page_array
|
||||
)
|
||||
{
|
||||
NvBool write = 1;
|
||||
struct page **user_pages = page_array;
|
||||
NvU32 i;
|
||||
|
||||
for (i = 0; i < page_count; i++)
|
||||
{
|
||||
if (write)
|
||||
set_page_dirty_lock(user_pages[i]);
|
||||
put_page(user_pages[i]);
|
||||
}
|
||||
|
||||
os_free_mem(user_pages);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
206
kernel-open/nvidia/os-pci.c
Normal file
206
kernel-open/nvidia/os-pci.c
Normal file
@@ -0,0 +1,206 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
void* NV_API_CALL os_pci_init_handle(
|
||||
NvU32 domain,
|
||||
NvU8 bus,
|
||||
NvU8 slot,
|
||||
NvU8 function,
|
||||
NvU16 *vendor,
|
||||
NvU16 *device
|
||||
)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
unsigned int devfn = PCI_DEVFN(slot, function);
|
||||
|
||||
if (!NV_MAY_SLEEP())
|
||||
return NULL;
|
||||
|
||||
dev = NV_GET_DOMAIN_BUS_AND_SLOT(domain, bus, devfn);
|
||||
if (dev != NULL)
|
||||
{
|
||||
if (vendor) *vendor = dev->vendor;
|
||||
if (device) *device = dev->device;
|
||||
pci_dev_put(dev); /* TODO: Fix me! (hotplug) */
|
||||
}
|
||||
return (void *) dev;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_pci_read_byte(
|
||||
void *handle,
|
||||
NvU32 offset,
|
||||
NvU8 *pReturnValue
|
||||
)
|
||||
{
|
||||
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
|
||||
{
|
||||
*pReturnValue = 0xff;
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
pci_read_config_byte( (struct pci_dev *) handle, offset, pReturnValue);
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_pci_read_word(
|
||||
void *handle,
|
||||
NvU32 offset,
|
||||
NvU16 *pReturnValue
|
||||
)
|
||||
{
|
||||
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
|
||||
{
|
||||
*pReturnValue = 0xffff;
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
pci_read_config_word( (struct pci_dev *) handle, offset, pReturnValue);
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_pci_read_dword(
|
||||
void *handle,
|
||||
NvU32 offset,
|
||||
NvU32 *pReturnValue
|
||||
)
|
||||
{
|
||||
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
|
||||
{
|
||||
*pReturnValue = 0xffffffff;
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
pci_read_config_dword( (struct pci_dev *) handle, offset, pReturnValue);
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_pci_write_byte(
|
||||
void *handle,
|
||||
NvU32 offset,
|
||||
NvU8 value
|
||||
)
|
||||
{
|
||||
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
|
||||
pci_write_config_byte( (struct pci_dev *) handle, offset, value);
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_pci_write_word(
|
||||
void *handle,
|
||||
NvU32 offset,
|
||||
NvU16 value
|
||||
)
|
||||
{
|
||||
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
|
||||
pci_write_config_word( (struct pci_dev *) handle, offset, value);
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_pci_write_dword(
|
||||
void *handle,
|
||||
NvU32 offset,
|
||||
NvU32 value
|
||||
)
|
||||
{
|
||||
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
|
||||
pci_write_config_dword( (struct pci_dev *) handle, offset, value);
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NvBool NV_API_CALL os_pci_remove_supported(void)
|
||||
{
|
||||
#if defined NV_PCI_STOP_AND_REMOVE_BUS_DEVICE
|
||||
return NV_TRUE;
|
||||
#else
|
||||
return NV_FALSE;
|
||||
#endif
|
||||
}
|
||||
|
||||
void NV_API_CALL os_pci_remove(
|
||||
void *handle
|
||||
)
|
||||
{
|
||||
#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE)
|
||||
NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(handle);
|
||||
#elif defined(DEBUG)
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s() is called even though NV_PCI_STOP_AND_REMOVE_BUS_DEVICE is not defined\n",
|
||||
__FUNCTION__);
|
||||
os_dbg_breakpoint();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
336
kernel-open/nvidia/os-registry.c
Normal file
336
kernel-open/nvidia/os-registry.c
Normal file
@@ -0,0 +1,336 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2000-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#define NV_DEFINE_REGISTRY_KEY_TABLE
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-reg.h"
|
||||
#include "nv-gpu-info.h"
|
||||
|
||||
/*!
|
||||
* @brief This function parses the PCI BDF identifier string and returns the
|
||||
* Domain, Bus, Device and function components from the PCI BDF string.
|
||||
*
|
||||
* This parser is highly adaptable and hence allows PCI BDF string in following
|
||||
* 3 formats.
|
||||
*
|
||||
* 1) bus:slot : Domain and function defaults to 0.
|
||||
* 2) domain:bus:slot : Function defaults to 0.
|
||||
* 3) domain:bus:slot.func : Complete PCI dev id string.
|
||||
*
|
||||
* @param[in] pci_dev_str String containing the BDF to be parsed.
|
||||
* @param[out] pci_domain Pointer where pci_domain is to be returned.
|
||||
* @param[out] pci_bus Pointer where pci_bus is to be returned.
|
||||
* @param[out] pci_slot Pointer where pci_slot is to be returned.
|
||||
* @param[out] pci_func Pointer where pci_func is to be returned.
|
||||
*
|
||||
* @return NV_TRUE if succeeds, or NV_FALSE otherwise.
|
||||
*/
|
||||
static NV_STATUS pci_str_to_bdf(char *pci_dev_str, NvU32 *pci_domain,
|
||||
NvU32 *pci_bus, NvU32 *pci_slot, NvU32 *pci_func)
|
||||
{
|
||||
char *option_string = NULL;
|
||||
char *token, *string;
|
||||
NvU32 domain, bus, slot;
|
||||
NV_STATUS status = NV_OK;
|
||||
|
||||
//
|
||||
// remove_spaces() allocates memory, hence we need to keep a pointer
|
||||
// to the original string for freeing at end of function.
|
||||
//
|
||||
if ((option_string = rm_remove_spaces(pci_dev_str)) == NULL)
|
||||
{
|
||||
// memory allocation failed, returning
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
|
||||
string = option_string;
|
||||
|
||||
if (!strlen(string) || !pci_domain || !pci_bus || !pci_slot || !pci_func)
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((token = strsep(&string, ".")) != NULL)
|
||||
{
|
||||
// PCI device can have maximum 8 functions only.
|
||||
if ((string != NULL) && (!(*string >= '0' && *string <= '7') ||
|
||||
(strlen(string) > 1)))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Invalid PCI function in token %s\n",
|
||||
pci_dev_str);
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto done;
|
||||
}
|
||||
else if (string == NULL)
|
||||
{
|
||||
*pci_func = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
*pci_func = (NvU32)(*string - '0');
|
||||
}
|
||||
|
||||
domain = simple_strtoul(token, &string, 16);
|
||||
|
||||
if ((string == NULL) || (*string != ':') || (*(string + 1) == '\0'))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Invalid PCI domain/bus in token %s\n",
|
||||
pci_dev_str);
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto done;
|
||||
}
|
||||
|
||||
token = string;
|
||||
bus = simple_strtoul((token + 1), &string, 16);
|
||||
|
||||
if (string == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Invalid PCI bus/slot in token %s\n",
|
||||
pci_dev_str);
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (*string != '\0')
|
||||
{
|
||||
if ((*string != ':') || (*(string + 1) == '\0'))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Invalid PCI slot in token %s\n",
|
||||
pci_dev_str);
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto done;
|
||||
}
|
||||
|
||||
token = string;
|
||||
slot = (NvU32)simple_strtoul(token + 1, &string, 16);
|
||||
if ((slot == 0) && ((token + 1) == string))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Invalid PCI slot in token %s\n",
|
||||
pci_dev_str);
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto done;
|
||||
}
|
||||
*pci_domain = domain;
|
||||
*pci_bus = bus;
|
||||
*pci_slot = slot;
|
||||
}
|
||||
else
|
||||
{
|
||||
*pci_slot = bus;
|
||||
*pci_bus = domain;
|
||||
*pci_domain = 0;
|
||||
}
|
||||
status = NV_OK;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
done:
|
||||
// Freeing the memory allocated by remove_spaces().
|
||||
os_free_mem(option_string);
|
||||
return status;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief This function parses the registry keys per GPU device. It accepts a
|
||||
* semicolon separated list of key=value pairs. The first key value pair MUST be
|
||||
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
|
||||
* number and F is the Function. This PCI BDF is used to identify which GPU to
|
||||
* assign the registry keys that follows next.
|
||||
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
|
||||
* found, then all the registry keys that follows are skipped, until we find next
|
||||
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
|
||||
* the value of the "pci" string:
|
||||
* 1) bus:slot : Domain and function defaults to 0.
|
||||
* 2) domain:bus:slot : Function defaults to 0.
|
||||
* 3) domain:bus:slot.func : Complete PCI dev id string.
|
||||
*
|
||||
*
|
||||
* @param[in] sp pointer to nvidia_stack_t struct.
|
||||
*
|
||||
* @return NV_OK if succeeds, or NV_STATUS error code otherwise.
|
||||
*/
|
||||
NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
char *option_string = NULL;
|
||||
char *ptr, *token;
|
||||
char *name, *value;
|
||||
NvU32 data, domain, bus, slot, func;
|
||||
nv_linux_state_t *nvl = NULL;
|
||||
nv_state_t *nv = NULL;
|
||||
|
||||
if (NVreg_RegistryDwordsPerDevice != NULL)
|
||||
{
|
||||
if ((option_string = rm_remove_spaces(NVreg_RegistryDwordsPerDevice)) == NULL)
|
||||
{
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
|
||||
ptr = option_string;
|
||||
|
||||
while ((token = strsep(&ptr, ";")) != NULL)
|
||||
{
|
||||
if (!(name = strsep(&token, "=")) || !strlen(name))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(value = strsep(&token, "=")) || !strlen(value))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (strsep(&token, "=") != NULL)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this key is "pci", then value is pci_dev id string
|
||||
// which needs special parsing as it is NOT a dword.
|
||||
if (strcmp(name, NV_REG_PCI_DEVICE_BDF) == 0)
|
||||
{
|
||||
status = pci_str_to_bdf(value, &domain, &bus, &slot, &func);
|
||||
|
||||
// Check if PCI_DEV id string was in a valid format or NOT.
|
||||
if (NV_OK != status)
|
||||
{
|
||||
// lets reset cached pci dev
|
||||
nv = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
nvl = find_pci(domain, bus, slot, func);
|
||||
//
|
||||
// If NO GPU found corresponding to this GPU, then reset
|
||||
// cached state. This helps ignore the following registry
|
||||
// keys until valid PCI BDF is found in the commandline.
|
||||
//
|
||||
if (!nvl)
|
||||
{
|
||||
nv = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
nv = NV_STATE_PTR(nvl);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
//
|
||||
// Check if cached pci_dev string in the commandline is in valid
|
||||
// format, else we will skip all the successive registry entries
|
||||
// (<key, value> pairs) until a valid PCI_DEV string is encountered
|
||||
// in the commandline.
|
||||
//
|
||||
if (!nv)
|
||||
continue;
|
||||
|
||||
data = (NvU32)simple_strtoul(value, NULL, 0);
|
||||
|
||||
rm_write_registry_dword(sp, nv, name, data);
|
||||
}
|
||||
|
||||
os_free_mem(option_string);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compare given string UUID with the GpuBlacklist or ExcludedGpus registry
|
||||
* parameter string and return whether the UUID is in the GPU exclusion list
|
||||
*/
|
||||
NvBool nv_is_uuid_in_gpu_exclusion_list(const char *uuid)
|
||||
{
|
||||
const char *input;
|
||||
char *list;
|
||||
char *ptr;
|
||||
char *token;
|
||||
|
||||
//
|
||||
// When both NVreg_GpuBlacklist and NVreg_ExcludedGpus are defined
|
||||
// NVreg_ExcludedGpus takes precedence.
|
||||
//
|
||||
if (NVreg_ExcludedGpus != NULL)
|
||||
input = NVreg_ExcludedGpus;
|
||||
else if (NVreg_GpuBlacklist != NULL)
|
||||
input = NVreg_GpuBlacklist;
|
||||
else
|
||||
return NV_FALSE;
|
||||
|
||||
if ((list = rm_remove_spaces(input)) == NULL)
|
||||
return NV_FALSE;
|
||||
|
||||
ptr = list;
|
||||
|
||||
while ((token = strsep(&ptr, ",")) != NULL)
|
||||
{
|
||||
if (strcmp(token, uuid) == 0)
|
||||
{
|
||||
os_free_mem(list);
|
||||
return NV_TRUE;
|
||||
}
|
||||
}
|
||||
os_free_mem(list);
|
||||
return NV_FALSE;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_registry_init(void)
|
||||
{
|
||||
nv_parm_t *entry;
|
||||
unsigned int i;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
|
||||
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
||||
{
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
if (NVreg_RmMsg != NULL)
|
||||
{
|
||||
rm_write_registry_string(sp, NULL,
|
||||
"RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg));
|
||||
}
|
||||
|
||||
rm_parse_option_string(sp, NVreg_RegistryDwords);
|
||||
|
||||
for (i = 0; (entry = &nv_parms[i])->name != NULL; i++)
|
||||
{
|
||||
rm_write_registry_dword(sp, NULL, entry->name, *entry->data);
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
78
kernel-open/nvidia/os-usermap.c
Normal file
78
kernel-open/nvidia/os-usermap.c
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
void* NV_API_CALL os_map_user_space(
|
||||
NvU64 start,
|
||||
NvU64 size_bytes,
|
||||
NvU32 mode,
|
||||
NvU32 protect,
|
||||
void **priv_data
|
||||
)
|
||||
{
|
||||
return (void *)(NvUPtr)start;
|
||||
}
|
||||
|
||||
void NV_API_CALL os_unmap_user_space(
|
||||
void *address,
|
||||
NvU64 size,
|
||||
void *priv_data
|
||||
)
|
||||
{
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_match_mmap_offset(
|
||||
void *pAllocPrivate,
|
||||
NvU64 offset,
|
||||
NvU64 *pPageIndex
|
||||
)
|
||||
{
|
||||
nv_alloc_t *at = pAllocPrivate;
|
||||
NvU64 i;
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
if (at->flags.contig)
|
||||
{
|
||||
if (offset == (at->page_table[0]->phys_addr + (i * PAGE_SIZE)))
|
||||
{
|
||||
*pPageIndex = i;
|
||||
return NV_OK;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (offset == at->page_table[i]->phys_addr)
|
||||
{
|
||||
*pPageIndex = i;
|
||||
return NV_OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NV_ERR_OBJECT_NOT_FOUND;
|
||||
}
|
||||
205
kernel-open/nvidia/procfs_nvswitch.c
Normal file
205
kernel-open/nvidia/procfs_nvswitch.c
Normal file
@@ -0,0 +1,205 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "linux_nvswitch.h"
|
||||
#include "nv-procfs.h"
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
#if defined(CONFIG_PROC_FS)
|
||||
|
||||
#define NV_DEFINE_SINGLE_NVSWITCH_PROCFS_FILE(name) \
|
||||
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nv_system_pm_lock)
|
||||
|
||||
#define NVSWITCH_PROCFS_DIR "driver/nvidia-nvswitch"
|
||||
|
||||
static struct proc_dir_entry *nvswitch_procfs_dir;
|
||||
static struct proc_dir_entry *nvswitch_permissions;
|
||||
static struct proc_dir_entry *nvswitch_procfs_devices;
|
||||
|
||||
static int
|
||||
nv_procfs_read_permissions
|
||||
(
|
||||
struct seq_file *s,
|
||||
void *v
|
||||
)
|
||||
{
|
||||
// Restrict device node permissions - 0666. Used by nvidia-modprobe.
|
||||
seq_printf(s, "%s: %u\n", "DeviceFileMode", 438);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
NV_DEFINE_SINGLE_NVSWITCH_PROCFS_FILE(permissions);
|
||||
|
||||
static int
|
||||
nv_procfs_read_device_info
|
||||
(
|
||||
struct seq_file *s,
|
||||
void *v
|
||||
)
|
||||
{
|
||||
NVSWITCH_DEV *nvswitch_dev = s->private;
|
||||
|
||||
if (!nvswitch_dev)
|
||||
{
|
||||
NVSWITCH_OS_ASSERT(0);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
seq_printf(s, "BIOS Version: ");
|
||||
|
||||
if (nvswitch_dev->bios_ver)
|
||||
{
|
||||
seq_printf(s, "%02llx.%02llx.%02llx.%02llx.%02llx\n",
|
||||
nvswitch_dev->bios_ver >> 32,
|
||||
(nvswitch_dev->bios_ver >> 24) & 0xFF,
|
||||
(nvswitch_dev->bios_ver >> 16) & 0xFF,
|
||||
(nvswitch_dev->bios_ver >> 8) & 0xFF,
|
||||
nvswitch_dev->bios_ver & 0xFF);
|
||||
}
|
||||
else
|
||||
{
|
||||
seq_printf(s, "N/A\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
NV_DEFINE_SINGLE_NVSWITCH_PROCFS_FILE(device_info);
|
||||
|
||||
void
|
||||
nvswitch_procfs_device_remove
|
||||
(
|
||||
NVSWITCH_DEV *nvswitch_dev
|
||||
)
|
||||
{
|
||||
if (!nvswitch_dev || !nvswitch_dev->procfs_dir)
|
||||
{
|
||||
NVSWITCH_OS_ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
nv_procfs_unregister_all(nvswitch_dev->procfs_dir, nvswitch_dev->procfs_dir);
|
||||
nvswitch_dev->procfs_dir = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
nvswitch_procfs_device_add
|
||||
(
|
||||
NVSWITCH_DEV *nvswitch_dev
|
||||
)
|
||||
{
|
||||
struct pci_dev *pci_dev;
|
||||
struct proc_dir_entry *device_dir, *entry;
|
||||
char name[32];
|
||||
|
||||
if (!nvswitch_dev || !nvswitch_dev->pci_dev)
|
||||
{
|
||||
NVSWITCH_OS_ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pci_dev = nvswitch_dev->pci_dev;
|
||||
|
||||
snprintf(name, sizeof(name), "%04x:%02x:%02x.%1x",
|
||||
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
|
||||
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
|
||||
|
||||
device_dir = NV_CREATE_PROC_DIR(name, nvswitch_procfs_devices);
|
||||
if (!device_dir)
|
||||
return -1;
|
||||
|
||||
nvswitch_dev->procfs_dir = device_dir;
|
||||
|
||||
entry = NV_CREATE_PROC_FILE("information", device_dir, device_info,
|
||||
nvswitch_dev);
|
||||
if (!entry)
|
||||
goto failed;
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
nvswitch_procfs_device_remove(nvswitch_dev);
|
||||
return -1;
|
||||
}
|
||||
|
||||
void
|
||||
nvswitch_procfs_exit
|
||||
(
|
||||
void
|
||||
)
|
||||
{
|
||||
if (!nvswitch_procfs_dir)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv_procfs_unregister_all(nvswitch_procfs_dir, nvswitch_procfs_dir);
|
||||
nvswitch_procfs_dir = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
nvswitch_procfs_init
|
||||
(
|
||||
void
|
||||
)
|
||||
{
|
||||
nvswitch_procfs_dir = NV_CREATE_PROC_DIR(NVSWITCH_PROCFS_DIR, NULL);
|
||||
if (!nvswitch_procfs_dir)
|
||||
{
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
nvswitch_permissions = NV_CREATE_PROC_FILE("permissions",
|
||||
nvswitch_procfs_dir,
|
||||
permissions,
|
||||
NULL);
|
||||
if (!nvswitch_permissions)
|
||||
{
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
nvswitch_procfs_devices = NV_CREATE_PROC_DIR("devices", nvswitch_procfs_dir);
|
||||
if (!nvswitch_procfs_devices)
|
||||
{
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
|
||||
nvswitch_procfs_exit();
|
||||
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
#else // !CONFIG_PROC_FS
|
||||
|
||||
int nvswitch_procfs_init(void) { return 0; }
|
||||
void nvswitch_procfs_exit(void) { }
|
||||
int nvswitch_procfs_device_add(NVSWITCH_DEV *nvswitch_dev) { return 0; }
|
||||
void nvswitch_procfs_device_remove(NVSWITCH_DEV *nvswitch_dev) { }
|
||||
|
||||
#endif // CONFIG_PROC_FS
|
||||
31
kernel-open/nvidia/rmp2pdefines.h
Normal file
31
kernel-open/nvidia/rmp2pdefines.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _RMP2PDEFINES_H_
|
||||
#define _RMP2PDEFINES_H_
|
||||
|
||||
#define NVRM_P2P_PAGESIZE_SMALL_4K (4 << 10)
|
||||
#define NVRM_P2P_PAGESIZE_BIG_64K (64 << 10)
|
||||
#define NVRM_P2P_PAGESIZE_BIG_128K (128 << 10)
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user