515.43.04

This commit is contained in:
Andy Ritger
2022-05-09 13:18:59 -07:00
commit 1739a20efc
2519 changed files with 1060036 additions and 0 deletions

View File

@@ -0,0 +1,335 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-kthread-q.h"
#include "nv-list-helpers.h"
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#if defined(NV_LINUX_BUG_H_PRESENT)
#include <linux/bug.h>
#else
#include <asm/bug.h>
#endif
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:
//
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
//
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
//
// You can create any number of queues, each of which gets its own
// named kernel thread (kthread). You can then insert arbitrary functions
// into the queue, and those functions will be run in the context of the
// queue's kthread.
#ifndef WARN
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
// to implement this, because such kernels won't be supported much longer.
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
printk(KERN_ERR format); \
unlikely(__ret_warn_on); \
})
#endif
#define NVQ_WARN(fmt, ...) \
do { \
if (in_interrupt()) { \
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
##__VA_ARGS__); \
} \
else { \
WARN(1, "nv_kthread_q: task: %s: " fmt, \
current->comm, \
##__VA_ARGS__); \
} \
} while (0)
static int _main_loop(void *args)
{
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
nv_kthread_q_item_t *q_item = NULL;
unsigned long flags;
while (1) {
// Normally this thread is never interrupted. However,
// down_interruptible (instead of down) is called here,
// in order to avoid being classified as a potentially
// hung task, by the kernel watchdog.
while (down_interruptible(&q->q_sem))
NVQ_WARN("Interrupted during semaphore wait\n");
if (atomic_read(&q->main_loop_should_exit))
break;
spin_lock_irqsave(&q->q_lock, flags);
// The q_sem semaphore prevents us from getting here unless there is
// at least one item in the list, so an empty list indicates a bug.
if (unlikely(list_empty(&q->q_list_head))) {
spin_unlock_irqrestore(&q->q_lock, flags);
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
continue;
}
// Consume one item from the queue
q_item = list_first_entry(&q->q_list_head,
nv_kthread_q_item_t,
q_list_node);
list_del_init(&q_item->q_list_node);
spin_unlock_irqrestore(&q->q_lock, flags);
// Run the item
q_item->function_to_run(q_item->function_args);
// Make debugging a little simpler by clearing this between runs:
q_item = NULL;
}
while (!kthread_should_stop())
schedule();
return 0;
}
void nv_kthread_q_stop(nv_kthread_q_t *q)
{
// check if queue has been properly initialized
if (unlikely(!q->q_kthread))
return;
nv_kthread_q_flush(q);
// If this assertion fires, then a caller likely either broke the API rules,
// by adding items after calling nv_kthread_q_stop, or possibly messed up
// with inadequate flushing of self-rescheduling q_items.
if (unlikely(!list_empty(&q->q_list_head)))
NVQ_WARN("list not empty after flushing\n");
if (likely(!atomic_read(&q->main_loop_should_exit))) {
atomic_set(&q->main_loop_should_exit, 1);
// Wake up the kthread so that it can see that it needs to stop:
up(&q->q_sem);
kthread_stop(q->q_kthread);
q->q_kthread = NULL;
}
}
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
// stack location ends up being a function of the core assigned to the current
// thread, instead of being a function of the specified NUMA node. The cache was
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
// ("fork: Optimize task creation by caching two thread stacks per CPU if
// CONFIG_VMAP_STACK=y")
//
// To work around the problematic cache, we create up to three kernel threads
// -If the first thread's stack is resident on the preferred node, return this
// thread.
// -Otherwise, create a second thread. If its stack is resident on the
// preferred node, stop the first thread and return this one.
// -Otherwise, create a third thread. The stack allocator does not find a
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
// consideration. The first two threads are then stopped.
//
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
//
// This function is never invoked when there is no NUMA preference (preferred
// node is NUMA_NO_NODE).
#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
nv_kthread_q_t *q,
int preferred_node,
const char *q_name)
{
unsigned i, j;
const static unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {
struct page *stack;
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
if (unlikely(IS_ERR(thread[i]))) {
// Instead of failing, pick the previous thread, even if its
// stack is not allocated on the preferred node.
if (i > 0)
i--;
break;
}
// vmalloc is not used to allocate the stack, so simply return the
// thread, even if its stack may not be allocated on the preferred node
if (!is_vmalloc_addr(thread[i]->stack))
break;
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if ((i == (attempts - 1)))
break;
// Get the NUMA node where the first page of the stack is resident. If
// it is the preferred node, select this thread.
stack = vmalloc_to_page(thread[i]->stack);
if (page_to_nid(stack) == preferred_node)
break;
}
for (j = i; j > 0; j--)
kthread_stop(thread[j - 1]);
return thread[i];
}
#endif
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
{
memset(q, 0, sizeof(*q));
INIT_LIST_HEAD(&q->q_list_head);
spin_lock_init(&q->q_lock);
sema_init(&q->q_sem, 0);
if (preferred_node == NV_KTHREAD_NO_NODE) {
q->q_kthread = kthread_create(_main_loop, q, q_name);
}
else {
#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
#else
return -ENOTSUPP;
#endif
}
if (IS_ERR(q->q_kthread)) {
int err = PTR_ERR(q->q_kthread);
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
// safely called on it making error handling easier.
q->q_kthread = NULL;
return err;
}
wake_up_process(q->q_kthread);
return 0;
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&q->q_lock, flags);
if (likely(list_empty(&q_item->q_list_node)))
list_add_tail(&q_item->q_list_node, &q->q_list_head);
else
ret = 0;
spin_unlock_irqrestore(&q->q_lock, flags);
if (likely(ret))
up(&q->q_sem);
return ret;
}
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
nv_q_func_t function_to_run,
void *function_args)
{
INIT_LIST_HEAD(&q_item->q_list_node);
q_item->function_to_run = function_to_run;
q_item->function_args = function_args;
}
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
nv_kthread_q_item_t *q_item)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
"called with a non-alive q: 0x%p\n", q);
return 0;
}
return _raw_q_schedule(q, q_item);
}
static void _q_flush_function(void *args)
{
struct completion *completion = (struct completion *)args;
complete(completion);
}
static void _raw_q_flush(nv_kthread_q_t *q)
{
nv_kthread_q_item_t q_item;
DECLARE_COMPLETION(completion);
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
_raw_q_schedule(q, &q_item);
// Wait for the flush item to run. Once it has run, then all of the
// previously queued items in front of it will have run, so that means
// the flush is complete.
wait_for_completion(&completion);
}
void nv_kthread_q_flush(nv_kthread_q_t *q)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
"nv_kthread_q_stop. q: 0x%p\n", q);
return;
}
// This 2x flush is not a typing mistake. The queue really does have to be
// flushed twice, in order to take care of the case of a q_item that
// reschedules itself.
_raw_q_flush(q);
_raw_q_flush(q);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,363 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* Define the entry points which the NVKMS kernel interface layer
* provides to core NVKMS.
*/
#if !defined(_NVIDIA_MODESET_OS_INTERFACE_H_)
#define _NVIDIA_MODESET_OS_INTERFACE_H_
#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
#include <linux/stddef.h> /* size_t */
#else
#include <stddef.h> /* size_t */
#endif
#include "nvtypes.h" /* NvU8 */
#include "nvkms.h"
#include "nv_stdarg.h"
enum NvKmsSyncPtOp {
NVKMS_SYNCPT_OP_ALLOC,
NVKMS_SYNCPT_OP_GET,
NVKMS_SYNCPT_OP_PUT,
NVKMS_SYNCPT_OP_INCR_MAX,
NVKMS_SYNCPT_OP_CPU_INCR,
NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH,
NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD,
NVKMS_SYNCPT_OP_READ_MINVAL,
NVKMS_SYNCPT_OP_READ_MAXVAL,
NVKMS_SYNCPT_OP_SET_MIN_EQ_MAX,
NVKMS_SYNCPT_OP_SET_MAXVAL,
};
typedef struct {
struct {
const char *syncpt_name; /* in */
NvU32 id; /* out */
} alloc;
struct {
NvU32 id; /* in */
} get;
struct {
NvU32 id; /* in */
} put;
struct {
NvU32 id; /* in */
NvU32 incr; /* in */
NvU32 value; /* out */
} incr_max;
struct {
NvU32 id; /* in */
} cpu_incr;
struct {
NvS32 fd; /* in */
NvU32 id; /* out */
NvU32 thresh; /* out */
} fd_to_id_and_thresh;
struct {
NvU32 id; /* in */
NvU32 thresh; /* in */
NvS32 fd; /* out */
} id_and_thresh_to_fd;
struct {
NvU32 id; /* in */
NvU32 minval; /* out */
} read_minval;
struct {
NvU32 id; /* in */
NvU32 maxval; /* out */
} read_maxval;
struct {
NvU32 id; /* in */
} set_min_eq_max;
struct {
NvU32 id; /* in */
NvU32 val; /* in */
} set_maxval;
} NvKmsSyncPtOpParams;
void nvkms_call_rm (void *ops);
void* nvkms_alloc (size_t size,
NvBool zero);
void nvkms_free (void *ptr,
size_t size);
void* nvkms_memset (void *ptr,
NvU8 c,
size_t size);
void* nvkms_memcpy (void *dest,
const void *src,
size_t n);
void* nvkms_memmove (void *dest,
const void *src,
size_t n);
int nvkms_memcmp (const void *s1,
const void *s2,
size_t n);
size_t nvkms_strlen (const char *s);
int nvkms_strcmp (const char *s1,
const char *s2);
char* nvkms_strncpy (char *dest,
const char *src,
size_t n);
void nvkms_usleep (NvU64 usec);
NvU64 nvkms_get_usec (void);
int nvkms_copyin (void *kptr,
NvU64 uaddr,
size_t n);
int nvkms_copyout (NvU64 uaddr,
const void *kptr,
size_t n);
void nvkms_yield (void);
void nvkms_dump_stack (void);
NvBool nvkms_syncpt_op (enum NvKmsSyncPtOp op,
NvKmsSyncPtOpParams *params);
int nvkms_snprintf (char *str,
size_t size,
const char *format, ...)
__attribute__((format (printf, 3, 4)));
int nvkms_vsnprintf (char *str,
size_t size,
const char *format,
va_list ap);
#define NVKMS_LOG_LEVEL_INFO 0
#define NVKMS_LOG_LEVEL_WARN 1
#define NVKMS_LOG_LEVEL_ERROR 2
void nvkms_log (const int level,
const char *gpuPrefix,
const char *msg);
/*!
* Refcounted pointer to an object that may be freed while references still
* exist.
*
* This structure is intended to be used for nvkms timers to refer to objects
* that may be freed while timers with references to the object are still
* pending.
*
* When the owner of an nvkms_ref_ptr is freed, the teardown code should call
* nvkms_free_ref_ptr(). That marks the pointer as invalid so that later calls
* to nvkms_dec_ref() (i.e. from a workqueue callback) return NULL rather than
* the pointer originally passed to nvkms_alloc_ref_ptr().
*/
struct nvkms_ref_ptr;
/*!
* Allocate and initialize a ref_ptr.
*
* The pointer stored in the ref_ptr is initialized to ptr, and its refcount is
* initialized to 1.
*/
struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr);
/*!
* Clear a ref_ptr.
*
* This function sets the pointer stored in the ref_ptr to NULL and drops the
* reference created by nvkms_alloc_ref_ptr(). This function should be called
* when the object pointed to by the ref_ptr is freed.
*
* A caller should make sure that no code that can call nvkms_inc_ref() can
* execute after nvkms_free_ref_ptr() is called.
*/
void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr);
/*!
* Increment the refcount of a ref_ptr.
*
* This function should be used when a pointer to the ref_ptr is stored
* somewhere. For example, when the ref_ptr is used as the argument to
* nvkms_alloc_timer.
*
* This may be called outside of the nvkms_lock, for example by an RM callback.
*/
void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr);
/*!
* Decrement the refcount of a ref_ptr and extract the embedded pointer.
*
* This should be used by code that needs to atomically determine whether the
* object pointed to by the ref_ptr still exists. To prevent the object from
* being destroyed while the current thread is executing, this should be called
* from inside the nvkms_lock.
*/
void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr);
typedef void nvkms_timer_proc_t(void *dataPtr, NvU32 dataU32);
typedef struct nvkms_timer_t nvkms_timer_handle_t;
/*!
* Schedule a callback function to be called in the future.
*
* The callback function 'proc' will be called with the arguments
* 'dataPtr' and 'dataU32' at 'usec' (or later) microseconds from now.
* If usec==0, the callback will be scheduled to be called as soon as
* possible.
*
* The callback function is guaranteed to be called back with the
* nvkms_lock held, and in process context.
*
* Returns an opaque handle, nvkms_timer_handle_t*, or NULL on
* failure. If non-NULL, the caller is responsible for caching the
* handle and eventually calling nvkms_free_timer() to free the
* memory.
*
* The nvkms_lock may be held when nvkms_alloc_timer() is called, but
* the nvkms_lock is not required.
*/
nvkms_timer_handle_t* nvkms_alloc_timer (nvkms_timer_proc_t *proc,
void *dataPtr, NvU32 dataU32,
NvU64 usec);
/*!
* Schedule a callback function to be called in the future.
*
* This function is like nvkms_alloc_timer() except that instead of returning a
* pointer to a structure that the caller should free later, the timer will free
* itself after executing the callback function. This is only intended for
* cases where the caller cannot cache the nvkms_alloc_timer() return value.
*/
NvBool
nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc,
struct nvkms_ref_ptr *ref_ptr,
NvU32 dataU32, NvU64 usec);
/*!
* Free the nvkms_timer_t object. If the callback function has not
* yet been called, freeing the nvkms_timer_handle_t will guarantee
* that it is not called.
*
* The nvkms_lock must be held when calling nvkms_free_timer().
*/
void nvkms_free_timer (nvkms_timer_handle_t *handle);
/*!
* Notify the NVKMS kernel interface that the event queue has changed.
*
* \param[in] pOpenKernel This indicates the file descriptor
* ("per-open") of the client whose event queue
* has been updated. This is the pointer
* passed by the kernel interface to nvKmsOpen().
* \param[in] eventsAvailable If TRUE, a new event has been added to the
* event queue. If FALSE, the last event has
* been removed from the event queue.
*/
void
nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel,
NvBool eventsAvailable);
/*!
* Get the "per-open" data (the pointer returned by nvKmsOpen())
* associated with this fd.
*/
void* nvkms_get_per_open_data(int fd);
/*!
* Raise and lower the reference count of the specified GPU.
*/
NvBool nvkms_open_gpu(NvU32 gpuId);
void nvkms_close_gpu(NvU32 gpuId);
/*!
* Enumerate nvidia gpus.
*/
NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info);
/*!
* Availability of write combining support for video memory.
*/
NvBool nvkms_allow_write_combining(void);
/*!
* Checks whether the fd is associated with an nvidia character device.
*/
NvBool nvkms_fd_is_nvidia_chardev(int fd);
/*!
* NVKMS interface for kernel space NVKMS clients like KAPI
*/
struct nvkms_per_open;
struct nvkms_per_open* nvkms_open_from_kapi
(
struct NvKmsKapiDevice *device
);
void nvkms_close_from_kapi(struct nvkms_per_open *popen);
NvBool nvkms_ioctl_from_kapi
(
struct nvkms_per_open *popen,
NvU32 cmd, void *params_address, const size_t params_size
);
/*!
* APIs for locking.
*/
typedef struct nvkms_sema_t nvkms_sema_handle_t;
nvkms_sema_handle_t*
nvkms_sema_alloc (void);
void nvkms_sema_free (nvkms_sema_handle_t *sema);
void nvkms_sema_down (nvkms_sema_handle_t *sema);
void nvkms_sema_up (nvkms_sema_handle_t *sema);
/*!
* APIs to register/unregister backlight device.
*/
struct nvkms_backlight_device;
struct nvkms_backlight_device*
nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv,
NvU32 current_brightness);
void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd);
#endif /* _NVIDIA_MODESET_OS_INTERFACE_H_ */

View File

@@ -0,0 +1,99 @@
###########################################################################
# Kbuild fragment for nvidia-modeset.ko
###########################################################################
#
# Define NVIDIA_MODESET_{SOURCES,OBJECTS}
#
NVIDIA_MODESET_SOURCES = nvidia-modeset/nvidia-modeset-linux.c
NVIDIA_MODESET_SOURCES += nvidia-modeset/nv-kthread-q.c
NVIDIA_MODESET_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_MODESET_SOURCES))
obj-m += nvidia-modeset.o
nvidia-modeset-y := $(NVIDIA_MODESET_OBJECTS)
NVIDIA_MODESET_KO = nvidia-modeset/nvidia-modeset.ko
NV_KERNEL_MODULE_TARGETS += $(NVIDIA_MODESET_KO)
#
# nv-modeset-kernel.o_binary is the core binary component of nvidia-modeset.ko,
# shared across all UNIX platforms. Create a symlink, "nv-modeset-kernel.o"
# that points to nv-modeset-kernel.o_binary, and add nv-modeset-kernel.o to the
# list of objects to link into nvidia-modeset.ko.
#
# Note that:
# - The kbuild "clean" rule will delete all objects in nvidia-modeset-y (which
# is why we use a symlink instead of just adding nv-modeset-kernel.o_binary
# to nvidia-modeset-y).
# - kbuild normally uses the naming convention of ".o_shipped" for
# binary files. That is not used here, because the kbuild rule to
# create the "normal" object file from ".o_shipped" does a copy, not
# a symlink. This file is quite large, so a symlink is preferred.
# - The file added to nvidia-modeset-y should be relative to gmake's cwd.
# But, the target for the symlink rule should be prepended with $(obj).
#
NVIDIA_MODESET_BINARY_OBJECT := $(src)/nvidia-modeset/nv-modeset-kernel.o_binary
NVIDIA_MODESET_BINARY_OBJECT_O := nvidia-modeset/nv-modeset-kernel.o
quiet_cmd_symlink = SYMLINK $@
cmd_symlink = ln -sf $< $@
targets += $(NVIDIA_MODESET_BINARY_OBJECT_O)
$(obj)/$(NVIDIA_MODESET_BINARY_OBJECT_O): $(NVIDIA_MODESET_BINARY_OBJECT) FORCE
$(call if_changed,symlink)
nvidia-modeset-y += $(NVIDIA_MODESET_BINARY_OBJECT_O)
#
# Define nvidia-modeset.ko-specific CFLAGS.
#
NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset
NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_MODESET_OBJECTS), $(NVIDIA_MODESET_CFLAGS))
#
# Build nv-modeset-interface.o from the kernel interface layer
# objects, suitable for further processing by the installer and
# inclusion as a precompiled kernel interface file.
#
NVIDIA_MODESET_INTERFACE := nvidia-modeset/nv-modeset-interface.o
# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions
# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6
# look at both.
always += $(NVIDIA_MODESET_INTERFACE)
always-y += $(NVIDIA_MODESET_INTERFACE)
$(obj)/$(NVIDIA_MODESET_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_MODESET_OBJECTS))
$(LD) -r -o $@ $^
#
# Register the conftests needed by nvidia-modeset.ko
#
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_MODESET_OBJECTS)
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations
NV_CONFTEST_TYPE_COMPILE_TESTS += node_states_n_memory
NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
NV_CONFTEST_FUNCTION_COMPILE_TESTS += proc_remove
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_create_on_node
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_kthread_create_on_node

View File

@@ -0,0 +1,73 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(NVKMS_IOCTL_H)
#define NVKMS_IOCTL_H
#include "nvtypes.h"
/*!
* Some of the NVKMS ioctl parameter data structures are quite large
* and would exceed the parameter size constraints on at least SunOS.
*
* Redirect ioctls through a level of indirection: user-space assigns
* NvKmsIoctlParams with the real command, size, and pointer, and
* passes the NvKmsIoctlParams through the ioctl.
*/
struct NvKmsIoctlParams {
NvU32 cmd;
NvU32 size;
NvU64 address NV_ALIGN_BYTES(8);
};
#define NVKMS_IOCTL_MAGIC 'm'
#define NVKMS_IOCTL_CMD 0
#define NVKMS_IOCTL_IOWR \
_IOWR(NVKMS_IOCTL_MAGIC, NVKMS_IOCTL_CMD, struct NvKmsIoctlParams)
/*!
* User-space pointers are always passed to NVKMS in an NvU64.
* This user-space address is eventually passed into the platform's
* copyin/copyout functions, in a void* argument.
*
* This utility function converts from an NvU64 to a pointer.
*/
static inline void *nvKmsNvU64ToPointer(NvU64 value)
{
return (void *)(NvUPtr)value;
}
/*!
* Before casting the NvU64 to a void*, check that casting to a pointer
* size within the kernel does not lose any precision in the current
* environment.
*/
static inline NvBool nvKmsNvU64AddressIsSafe(NvU64 address)
{
return address == (NvU64)(NvUPtr)address;
}
#endif /* NVKMS_IOCTL_H */

View File

@@ -0,0 +1,90 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_KMS_H__
#define __NV_KMS_H__
#include "nvtypes.h"
#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
#include <linux/stddef.h> /* size_t */
#else
#include <stddef.h> /* size_t */
#endif
#include "nvkms-kapi.h"
typedef struct nvkms_per_open nvkms_per_open_handle_t;
typedef void nvkms_procfs_out_string_func_t(void *data,
const char *str);
typedef void nvkms_procfs_proc_t(void *data,
char *buffer, size_t size,
nvkms_procfs_out_string_func_t *outString);
typedef struct {
const char *name;
nvkms_procfs_proc_t *func;
} nvkms_procfs_file_t;
enum NvKmsClientType {
NVKMS_CLIENT_USER_SPACE,
NVKMS_CLIENT_KERNEL_SPACE,
};
NvBool nvKmsIoctl(
void *pOpenVoid,
NvU32 cmd,
NvU64 paramsAddress,
const size_t paramSize);
void nvKmsClose(void *pOpenVoid);
void* nvKmsOpen(
NvU32 pid,
enum NvKmsClientType clientType,
nvkms_per_open_handle_t *pOpenKernel);
NvBool nvKmsModuleLoad(void);
void nvKmsModuleUnload(void);
void nvKmsSuspend(NvU32 gpuId);
void nvKmsResume(NvU32 gpuId);
void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles);
void nvKmsKapiHandleEventQueueChange
(
struct NvKmsKapiDevice *device
);
NvBool nvKmsKapiGetFunctionsTableInternal
(
struct NvKmsKapiFunctionsTable *funcsTable
);
NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness);
NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness);
#endif /* __NV_KMS_H__ */