mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-01-27 19:49:47 +00:00
Compare commits
34 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
346011d45c | ||
|
|
7a6a5a1f9a | ||
|
|
7d3cbfe254 | ||
|
|
9940d2229a | ||
|
|
5e52edb203 | ||
|
|
2cca8b3fd5 | ||
|
|
caa2dd11a0 | ||
|
|
e45d91de02 | ||
|
|
083cd9cf17 | ||
|
|
ea4c27fad6 | ||
|
|
3bf16b890c | ||
|
|
12933b2d3c | ||
|
|
476bd34534 | ||
|
|
91676d6628 | ||
|
|
bb2dac1f20 | ||
|
|
4c29105335 | ||
|
|
be3cd9abcb | ||
|
|
a2f89d6b59 | ||
|
|
f364378a65 | ||
|
|
b5bf85a8e3 | ||
|
|
f59818b751 | ||
|
|
a8e01be6b2 | ||
|
|
12c0739352 | ||
|
|
29f830f1bb | ||
|
|
337e28efda | ||
|
|
22a077c4fe | ||
|
|
26458140be | ||
|
|
eb5c7665a1 | ||
|
|
6dd092ddb7 | ||
|
|
4397463e73 | ||
|
|
e598191e8e | ||
|
|
1dc88ff75e | ||
|
|
811073c51e | ||
|
|
dac2350c7f |
26
.github/ISSUE_TEMPLATE/10_functional_bug.yml
vendored
26
.github/ISSUE_TEMPLATE/10_functional_bug.yml
vendored
@@ -1,5 +1,8 @@
|
||||
name: Report a functional bug 🐛
|
||||
description: Functional bugs affect operation or stability of the driver and/or hardware.
|
||||
description: |
|
||||
Functional bugs affect operation or stability of the driver or hardware.
|
||||
|
||||
Bugs with the closed source driver must be reported on the forums (see link on New Issue page below).
|
||||
labels:
|
||||
- "bug"
|
||||
body:
|
||||
@@ -18,14 +21,12 @@ body:
|
||||
description: "Which open-gpu-kernel-modules version are you running? Be as specific as possible: SHA is best when built from specific commit."
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
- type: checkboxes
|
||||
id: sw_driver_proprietary
|
||||
attributes:
|
||||
label: "Does this happen with the proprietary driver (of the same version) as well?"
|
||||
label: "Please confirm this issue does not happen with the proprietary driver (of the same version). This issue tracker is only for bugs specific to the open kernel driver."
|
||||
options:
|
||||
- "Yes"
|
||||
- "No"
|
||||
- "I cannot test this"
|
||||
- label: "I confirm that this does not happen with the proprietary driver package."
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
@@ -42,6 +43,14 @@ body:
|
||||
description: "Which kernel are you running? (output of `uname -a`, say if you built it yourself)"
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
id: sw_host_kernel_stable
|
||||
attributes:
|
||||
label: "Please confirm you are running a stable release kernel (e.g. not a -rc). We do not accept bug reports for unreleased kernels."
|
||||
options:
|
||||
- label: "I am running on a stable kernel release."
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: hw_gpu_type
|
||||
attributes:
|
||||
@@ -78,7 +87,10 @@ body:
|
||||
id: bug_report_gz
|
||||
attributes:
|
||||
label: nvidia-bug-report.log.gz
|
||||
description: "Please reproduce the problem, after that run `nvidia-bug-report.sh`, and attach the resulting nvidia-bug-report.log.gz here."
|
||||
description: |
|
||||
Please reproduce the problem, after that run `nvidia-bug-report.sh`, and attach the resulting nvidia-bug-report.log.gz here.
|
||||
|
||||
Reports without this file will be closed.
|
||||
placeholder: You can usually just drag & drop the file into this textbox.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/20_build_bug.yml
vendored
8
.github/ISSUE_TEMPLATE/20_build_bug.yml
vendored
@@ -32,6 +32,14 @@ body:
|
||||
description: "Which kernel are you running? (output of `uname -a`, say if you built it yourself)."
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
id: sw_host_kernel_stable
|
||||
attributes:
|
||||
label: "Please confirm you are running a stable release kernel (e.g. not a -rc). We do not accept bug reports for unreleased kernels."
|
||||
options:
|
||||
- label: "I am running on a stable kernel release."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: bug_description
|
||||
attributes:
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/config.yml
vendored
6
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,14 +1,14 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Report a bug with the proprietary driver
|
||||
url: https://forums.developer.nvidia.com/c/gpu-graphics/linux/148
|
||||
about: Bugs that aren't specific to the open source driver in this repository must be reported with the linked forums instead.
|
||||
- name: Report a cosmetic issue
|
||||
url: https://github.com/NVIDIA/open-gpu-kernel-modules/discussions/categories/general
|
||||
about: We are not currently accepting cosmetic-only changes such as whitespace, typos, or simple renames. You can still discuss and collect them on the boards.
|
||||
- name: Ask a question
|
||||
url: https://github.com/NVIDIA/open-gpu-kernel-modules/discussions/categories/q-a
|
||||
about: Unsure of what to click, where to go, what the process for your thing is? We're happy to help. Click to visit the discussion board and say hello!
|
||||
- name: Report a bug with the proprietary driver
|
||||
url: https://forums.developer.nvidia.com/c/gpu-graphics/linux/148
|
||||
about: Bugs that aren't specific to the open source driver in this repository should be reported with the linked forums instead. If you are unsure on what kind of bug you have, feel free to open a thread in Discussions. We're here to help!
|
||||
- name: Suggest a feature
|
||||
url: https://github.com/NVIDIA/open-gpu-kernel-modules/discussions/categories/ideas
|
||||
about: Please do not open Issues for feature requests; instead, suggest and discuss new features on the Github discussion board. If you have a feature you worked on and want to PR it, please also open a discussion before doing so.
|
||||
|
||||
96
CHANGELOG.md
96
CHANGELOG.md
@@ -1,96 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
## Release 525 Entries
|
||||
|
||||
### [525.60.13] 2022-12-05
|
||||
|
||||
### [525.60.11] 2022-11-28
|
||||
|
||||
### [525.53] 2022-11-10
|
||||
|
||||
#### Changed
|
||||
|
||||
- GSP firmware is now distributed as multiple firmware files: this release has `gsp_tu10x.bin` and `gsp_ad10x.bin` replacing `gsp.bin` from previous releases.
|
||||
- Each file is named after a GPU architecture and supports GPUs from one or more architectures. This allows GSP firmware to better leverage each architecture's capabilities.
|
||||
- The .run installer will continue to install firmware to `/lib/firmware/nvidia/<version>` and the `nvidia.ko` kernel module will load the appropriate firmware for each GPU at runtime.
|
||||
|
||||
#### Fixed
|
||||
|
||||
- Add support for IBT (indirect branch tracking) on supported platforms, [#256](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/256) by @rnd-ash
|
||||
- Return EINVAL when [failing to] allocating memory, [#280](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/280) by @YusufKhan-gamedev
|
||||
- Fix various typos in nvidia/src/kernel, [#16](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/16) by @alexisgeoffrey
|
||||
- Added support for rotation in X11, Quadro Sync, Stereo, and YUV 4:2:0 on Turing.
|
||||
|
||||
## Release 520 Entries
|
||||
|
||||
### [520.61.07] 2022-10-20
|
||||
|
||||
### [520.56.06] 2022-10-12
|
||||
|
||||
#### Added
|
||||
|
||||
- Introduce support for GeForce RTX 4090 GPUs.
|
||||
|
||||
### [520.61.05] 2022-10-10
|
||||
|
||||
#### Added
|
||||
|
||||
- Introduce support for NVIDIA H100 GPUs.
|
||||
|
||||
#### Fixed
|
||||
|
||||
- Fix/Improve Makefile, [#308](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/308/) by @izenynn
|
||||
- Make nvLogBase2 more efficient, [#177](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/177/) by @DMaroo
|
||||
- nv-pci: fixed always true expression, [#195](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/195/) by @ValZapod
|
||||
|
||||
## Release 515 Entries
|
||||
|
||||
### [515.76] 2022-09-20
|
||||
|
||||
#### Fixed
|
||||
|
||||
- Improved compatibility with new Linux kernel releases
|
||||
- Fixed possible excessive GPU power draw on an idle X11 or Wayland desktop when driving high resolutions or refresh rates
|
||||
|
||||
### [515.65.07] 2022-10-19
|
||||
|
||||
### [515.65.01] 2022-08-02
|
||||
|
||||
#### Fixed
|
||||
|
||||
- Collection of minor fixes to issues, [#6](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/61) by @Joshua-Ashton
|
||||
- Remove unnecessary use of acpi_bus_get_device().
|
||||
|
||||
### [515.57] 2022-06-28
|
||||
|
||||
#### Fixed
|
||||
|
||||
- Backtick is deprecated, [#273](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/273) by @arch-user-france1
|
||||
|
||||
### [515.48.07] 2022-05-31
|
||||
|
||||
#### Added
|
||||
|
||||
- List of compatible GPUs in README.md.
|
||||
|
||||
#### Fixed
|
||||
|
||||
- Fix various README capitalizations, [#8](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/8) by @27lx
|
||||
- Automatically tag bug report issues, [#15](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/15) by @thebeanogamer
|
||||
- Improve conftest.sh Script, [#37](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/37) by @Nitepone
|
||||
- Update HTTP link to HTTPS, [#101](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/101) by @alcaparra
|
||||
- moved array sanity check to before the array access, [#117](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/117) by @RealAstolfo
|
||||
- Fixed some typos, [#122](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/122) by @FEDOyt
|
||||
- Fixed capitalization, [#123](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/123) by @keroeslux
|
||||
- Fix typos in NVDEC Engine Descriptor, [#126](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/126) from @TrickyDmitriy
|
||||
- Extranous apostrohpes in a makefile script [sic], [#14](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/14) by @kiroma
|
||||
- HDMI no audio @ 4K above 60Hz, [#75](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/75) by @adolfotregosa
|
||||
- dp_configcaps.cpp:405: array index sanity check in wrong place?, [#110](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/110) by @dcb314
|
||||
- NVRM kgspInitRm_IMPL: missing NVDEC0 engine, cannot initialize GSP-RM, [#116](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/116) by @kfazz
|
||||
- ERROR: modpost: "backlight_device_register" [...nvidia-modeset.ko] undefined, [#135](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/135) by @sndirsch
|
||||
- aarch64 build fails, [#151](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/151) by @frezbo
|
||||
|
||||
### [515.43.04] 2022-05-11
|
||||
|
||||
- Initial release.
|
||||
|
||||
163
README.md
163
README.md
@@ -1,7 +1,7 @@
|
||||
# NVIDIA Linux Open GPU Kernel Module Source
|
||||
|
||||
This is the source release of the NVIDIA Linux open GPU kernel modules,
|
||||
version 525.60.13.
|
||||
version 550.142.
|
||||
|
||||
|
||||
## How to Build
|
||||
@@ -17,7 +17,7 @@ as root:
|
||||
|
||||
Note that the kernel modules built here must be used with GSP
|
||||
firmware and user-space NVIDIA GPU driver components from a corresponding
|
||||
525.60.13 driver release. This can be achieved by installing
|
||||
550.142 driver release. This can be achieved by installing
|
||||
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
|
||||
option. E.g.,
|
||||
|
||||
@@ -162,20 +162,36 @@ for the target kernel.
|
||||
- `src/nvidia/` The OS-agnostic code for nvidia.ko
|
||||
- `src/nvidia-modeset/` The OS-agnostic code for nvidia-modeset.ko
|
||||
- `src/common/` Utility code used by one or more of nvidia.ko and nvidia-modeset.ko
|
||||
- `nouveau/` Tools for integration with the Nouveau device driver
|
||||
|
||||
|
||||
## Nouveau device driver integration
|
||||
|
||||
The Python script in the 'nouveau' directory is used to extract some of the
|
||||
firmware binary images (and related data) encoded in the source code and
|
||||
store them as distinct files. These files are used by the Nouveau device
|
||||
driver to load and communicate with the GSP firmware.
|
||||
|
||||
The layout of the binary files is described in nouveau_firmware_layout.ods,
|
||||
which is an OpenDocument Spreadsheet file, compatible with most spreadsheet
|
||||
software applications.
|
||||
|
||||
|
||||
## Compatible GPUs
|
||||
|
||||
The open-gpu-kernel-modules can be used on any Turing or later GPU
|
||||
(see the table below). However, in the 525.60.13 release,
|
||||
GeForce and Workstation support is still considered alpha-quality.
|
||||
The NVIDIA open kernel modules can be used on any Turing or later GPU
|
||||
(see the table below). However, in the __DRIVER_VERION__ release, GeForce and
|
||||
Workstation support is considered to be Beta quality. The open kernel modules
|
||||
are suitable for broad usage, and NVIDIA requests feedback on any issues
|
||||
encountered specific to them.
|
||||
|
||||
To enable use of the open kernel modules on GeForce and Workstation GPUs,
|
||||
set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
|
||||
parameter to 1. For more details, see the NVIDIA GPU driver end user
|
||||
README here:
|
||||
For details on feature support and limitations, see the NVIDIA GPU driver
|
||||
end user README here:
|
||||
|
||||
https://us.download.nvidia.com/XFree86/Linux-x86_64/525.60.13/README/kernel_open.html
|
||||
https://us.download.nvidia.com/XFree86/Linux-x86_64/550.142/README/kernel_open.html
|
||||
|
||||
For vGPU support, please refer to the README.vgpu packaged in the vGPU Host
|
||||
Package for more details.
|
||||
|
||||
In the below table, if three IDs are listed, the first is the PCI Device
|
||||
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
|
||||
@@ -635,9 +651,12 @@ Subsystem Device ID.
|
||||
| NVIDIA T1000 8GB | 1FF0 17AA 1612 |
|
||||
| NVIDIA T400 4GB | 1FF2 1028 1613 |
|
||||
| NVIDIA T400 4GB | 1FF2 103C 1613 |
|
||||
| NVIDIA T400E | 1FF2 103C 18FF |
|
||||
| NVIDIA T400 4GB | 1FF2 103C 8A80 |
|
||||
| NVIDIA T400 4GB | 1FF2 10DE 1613 |
|
||||
| NVIDIA T400E | 1FF2 10DE 18FF |
|
||||
| NVIDIA T400 4GB | 1FF2 17AA 1613 |
|
||||
| NVIDIA T400E | 1FF2 17AA 18FF |
|
||||
| Quadro T1000 | 1FF9 |
|
||||
| NVIDIA A100-SXM4-40GB | 20B0 |
|
||||
| NVIDIA A100-PG509-200 | 20B0 10DE 1450 |
|
||||
@@ -645,12 +664,16 @@ Subsystem Device ID.
|
||||
| NVIDIA A100-SXM4-80GB | 20B2 10DE 147F |
|
||||
| NVIDIA A100-SXM4-80GB | 20B2 10DE 1622 |
|
||||
| NVIDIA A100-SXM4-80GB | 20B2 10DE 1623 |
|
||||
| NVIDIA PG506-242 | 20B3 10DE 14A7 |
|
||||
| NVIDIA PG506-243 | 20B3 10DE 14A8 |
|
||||
| NVIDIA PG509-210 | 20B2 10DE 1625 |
|
||||
| NVIDIA A100-SXM-64GB | 20B3 10DE 14A7 |
|
||||
| NVIDIA A100-SXM-64GB | 20B3 10DE 14A8 |
|
||||
| NVIDIA A100 80GB PCIe | 20B5 10DE 1533 |
|
||||
| NVIDIA A100 80GB PCIe | 20B5 10DE 1642 |
|
||||
| NVIDIA PG506-232 | 20B6 10DE 1492 |
|
||||
| NVIDIA A30 | 20B7 10DE 1532 |
|
||||
| NVIDIA A30 | 20B7 10DE 1804 |
|
||||
| NVIDIA A30 | 20B7 10DE 1852 |
|
||||
| NVIDIA A800-SXM4-40GB | 20BD 10DE 17F4 |
|
||||
| NVIDIA A100-PCIE-40GB | 20F1 10DE 145F |
|
||||
| NVIDIA A800-SXM4-80GB | 20F3 10DE 179B |
|
||||
| NVIDIA A800-SXM4-80GB | 20F3 10DE 179C |
|
||||
@@ -662,6 +685,11 @@ Subsystem Device ID.
|
||||
| NVIDIA A800-SXM4-80GB | 20F3 10DE 17A2 |
|
||||
| NVIDIA A800 80GB PCIe | 20F5 10DE 1799 |
|
||||
| NVIDIA A800 80GB PCIe LC | 20F5 10DE 179A |
|
||||
| NVIDIA A800 40GB Active | 20F6 1028 180A |
|
||||
| NVIDIA A800 40GB Active | 20F6 103C 180A |
|
||||
| NVIDIA A800 40GB Active | 20F6 10DE 180A |
|
||||
| NVIDIA A800 40GB Active | 20F6 17AA 180A |
|
||||
| NVIDIA AX800 | 20FD 10DE 17F8 |
|
||||
| NVIDIA GeForce GTX 1660 Ti | 2182 |
|
||||
| NVIDIA GeForce GTX 1660 | 2184 |
|
||||
| NVIDIA GeForce GTX 1650 SUPER | 2187 |
|
||||
@@ -720,7 +748,23 @@ Subsystem Device ID.
|
||||
| NVIDIA A10 | 2236 10DE 1482 |
|
||||
| NVIDIA A10G | 2237 10DE 152F |
|
||||
| NVIDIA A10M | 2238 10DE 1677 |
|
||||
| NVIDIA H100 NVL | 2321 10DE 1839 |
|
||||
| NVIDIA H800 PCIe | 2322 10DE 17A4 |
|
||||
| NVIDIA H800 | 2324 10DE 17A6 |
|
||||
| NVIDIA H800 | 2324 10DE 17A8 |
|
||||
| NVIDIA H20 | 2329 10DE 198B |
|
||||
| NVIDIA H20 | 2329 10DE 198C |
|
||||
| NVIDIA H20-3e | 232C 10DE 2063 |
|
||||
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C0 |
|
||||
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 |
|
||||
| NVIDIA H100 PCIe | 2331 10DE 1626 |
|
||||
| NVIDIA H200 | 2335 10DE 18BE |
|
||||
| NVIDIA H200 | 2335 10DE 18BF |
|
||||
| NVIDIA H100 | 2339 10DE 17FC |
|
||||
| NVIDIA H800 NVL | 233A 10DE 183A |
|
||||
| NVIDIA GH200 120GB | 2342 10DE 16EB |
|
||||
| NVIDIA GH200 120GB | 2342 10DE 1805 |
|
||||
| NVIDIA GH200 480GB | 2342 10DE 1809 |
|
||||
| NVIDIA GeForce RTX 3060 Ti | 2414 |
|
||||
| NVIDIA GeForce RTX 3080 Ti Laptop GPU | 2420 |
|
||||
| NVIDIA RTX A5500 Laptop GPU | 2438 |
|
||||
@@ -748,6 +792,7 @@ Subsystem Device ID.
|
||||
| NVIDIA RTX A3000 12GB Laptop GPU | 24B9 |
|
||||
| NVIDIA RTX A4500 Laptop GPU | 24BA |
|
||||
| NVIDIA RTX A3000 12GB Laptop GPU | 24BB |
|
||||
| NVIDIA GeForce RTX 3060 | 24C7 |
|
||||
| NVIDIA GeForce RTX 3060 Ti | 24C9 |
|
||||
| NVIDIA GeForce RTX 3080 Laptop GPU | 24DC |
|
||||
| NVIDIA GeForce RTX 3070 Laptop GPU | 24DD |
|
||||
@@ -771,6 +816,8 @@ Subsystem Device ID.
|
||||
| NVIDIA RTX A2000 12GB | 2571 103C 1611 |
|
||||
| NVIDIA RTX A2000 12GB | 2571 10DE 1611 |
|
||||
| NVIDIA RTX A2000 12GB | 2571 17AA 1611 |
|
||||
| NVIDIA GeForce RTX 3050 | 2582 |
|
||||
| NVIDIA GeForce RTX 3050 | 2584 |
|
||||
| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 25A0 |
|
||||
| NVIDIA GeForce RTX 3050Ti Laptop GPU | 25A0 103C 8928 |
|
||||
| NVIDIA GeForce RTX 3050Ti Laptop GPU | 25A0 103C 89F9 |
|
||||
@@ -783,15 +830,107 @@ Subsystem Device ID.
|
||||
| NVIDIA GeForce RTX 2050 | 25A7 |
|
||||
| NVIDIA GeForce RTX 2050 | 25A9 |
|
||||
| NVIDIA GeForce MX570 A | 25AA |
|
||||
| NVIDIA GeForce RTX 3050 4GB Laptop GPU | 25AB |
|
||||
| NVIDIA GeForce RTX 3050 6GB Laptop GPU | 25AC |
|
||||
| NVIDIA GeForce RTX 2050 | 25AD |
|
||||
| NVIDIA RTX A1000 | 25B0 1028 1878 |
|
||||
| NVIDIA RTX A1000 | 25B0 103C 1878 |
|
||||
| NVIDIA RTX A1000 | 25B0 10DE 1878 |
|
||||
| NVIDIA RTX A1000 | 25B0 17AA 1878 |
|
||||
| NVIDIA RTX A400 | 25B2 1028 1879 |
|
||||
| NVIDIA RTX A400 | 25B2 103C 1879 |
|
||||
| NVIDIA RTX A400 | 25B2 10DE 1879 |
|
||||
| NVIDIA RTX A400 | 25B2 17AA 1879 |
|
||||
| NVIDIA A16 | 25B6 10DE 14A9 |
|
||||
| NVIDIA A2 | 25B6 10DE 157E |
|
||||
| NVIDIA RTX A2000 Laptop GPU | 25B8 |
|
||||
| NVIDIA RTX A1000 Laptop GPU | 25B9 |
|
||||
| NVIDIA RTX A2000 8GB Laptop GPU | 25BA |
|
||||
| NVIDIA RTX A500 Laptop GPU | 25BB |
|
||||
| NVIDIA RTX A1000 6GB Laptop GPU | 25BC |
|
||||
| NVIDIA RTX A500 Laptop GPU | 25BD |
|
||||
| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 25E0 |
|
||||
| NVIDIA GeForce RTX 3050 Laptop GPU | 25E2 |
|
||||
| NVIDIA GeForce RTX 3050 Laptop GPU | 25E5 |
|
||||
| NVIDIA GeForce RTX 3050 6GB Laptop GPU | 25EC |
|
||||
| NVIDIA GeForce RTX 2050 | 25ED |
|
||||
| NVIDIA RTX A1000 Embedded GPU | 25F9 |
|
||||
| NVIDIA RTX A2000 Embedded GPU | 25FA |
|
||||
| NVIDIA RTX A500 Embedded GPU | 25FB |
|
||||
| NVIDIA GeForce RTX 4090 | 2684 |
|
||||
| NVIDIA GeForce RTX 4090 D | 2685 |
|
||||
| NVIDIA RTX 6000 Ada Generation | 26B1 1028 16A1 |
|
||||
| NVIDIA RTX 6000 Ada Generation | 26B1 103C 16A1 |
|
||||
| NVIDIA RTX 6000 Ada Generation | 26B1 10DE 16A1 |
|
||||
| NVIDIA RTX 6000 Ada Generation | 26B1 17AA 16A1 |
|
||||
| NVIDIA RTX 5000 Ada Generation | 26B2 1028 17FA |
|
||||
| NVIDIA RTX 5000 Ada Generation | 26B2 103C 17FA |
|
||||
| NVIDIA RTX 5000 Ada Generation | 26B2 10DE 17FA |
|
||||
| NVIDIA RTX 5000 Ada Generation | 26B2 17AA 17FA |
|
||||
| NVIDIA RTX 5880 Ada Generation | 26B3 1028 1934 |
|
||||
| NVIDIA RTX 5880 Ada Generation | 26B3 103C 1934 |
|
||||
| NVIDIA RTX 5880 Ada Generation | 26B3 10DE 1934 |
|
||||
| NVIDIA RTX 5880 Ada Generation | 26B3 17AA 1934 |
|
||||
| NVIDIA L40 | 26B5 10DE 169D |
|
||||
| NVIDIA L40 | 26B5 10DE 17DA |
|
||||
| NVIDIA L40S | 26B9 10DE 1851 |
|
||||
| NVIDIA L40S | 26B9 10DE 18CF |
|
||||
| NVIDIA L20 | 26BA 10DE 1957 |
|
||||
| NVIDIA L20 | 26BA 10DE 1990 |
|
||||
| NVIDIA GeForce RTX 4080 SUPER | 2702 |
|
||||
| NVIDIA GeForce RTX 4080 | 2704 |
|
||||
| NVIDIA GeForce RTX 4070 Ti SUPER | 2705 |
|
||||
| NVIDIA GeForce RTX 4070 | 2709 |
|
||||
| NVIDIA GeForce RTX 4090 Laptop GPU | 2717 |
|
||||
| NVIDIA RTX 5000 Ada Generation Laptop GPU | 2730 |
|
||||
| NVIDIA GeForce RTX 4090 Laptop GPU | 2757 |
|
||||
| NVIDIA RTX 5000 Ada Generation Embedded GPU | 2770 |
|
||||
| NVIDIA GeForce RTX 4070 Ti | 2782 |
|
||||
| NVIDIA GeForce RTX 4070 SUPER | 2783 |
|
||||
| NVIDIA GeForce RTX 4070 | 2786 |
|
||||
| NVIDIA GeForce RTX 4060 Ti | 2788 |
|
||||
| NVIDIA GeForce RTX 4080 Laptop GPU | 27A0 |
|
||||
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 1028 16FA |
|
||||
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 103C 16FA |
|
||||
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 10DE 16FA |
|
||||
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 17AA 16FA |
|
||||
| NVIDIA RTX 4500 Ada Generation | 27B1 1028 180C |
|
||||
| NVIDIA RTX 4500 Ada Generation | 27B1 103C 180C |
|
||||
| NVIDIA RTX 4500 Ada Generation | 27B1 10DE 180C |
|
||||
| NVIDIA RTX 4500 Ada Generation | 27B1 17AA 180C |
|
||||
| NVIDIA RTX 4000 Ada Generation | 27B2 1028 181B |
|
||||
| NVIDIA RTX 4000 Ada Generation | 27B2 103C 181B |
|
||||
| NVIDIA RTX 4000 Ada Generation | 27B2 10DE 181B |
|
||||
| NVIDIA RTX 4000 Ada Generation | 27B2 17AA 181B |
|
||||
| NVIDIA L2 | 27B6 10DE 1933 |
|
||||
| NVIDIA L4 | 27B8 10DE 16CA |
|
||||
| NVIDIA L4 | 27B8 10DE 16EE |
|
||||
| NVIDIA RTX 4000 Ada Generation Laptop GPU | 27BA |
|
||||
| NVIDIA RTX 3500 Ada Generation Laptop GPU | 27BB |
|
||||
| NVIDIA GeForce RTX 4080 Laptop GPU | 27E0 |
|
||||
| NVIDIA RTX 3500 Ada Generation Embedded GPU | 27FB |
|
||||
| NVIDIA GeForce RTX 4060 Ti | 2803 |
|
||||
| NVIDIA GeForce RTX 4060 Ti | 2805 |
|
||||
| NVIDIA GeForce RTX 4060 | 2808 |
|
||||
| NVIDIA GeForce RTX 4070 Laptop GPU | 2820 |
|
||||
| NVIDIA GeForce RTX 3050 A Laptop GPU | 2822 |
|
||||
| NVIDIA RTX 3000 Ada Generation Laptop GPU | 2838 |
|
||||
| NVIDIA GeForce RTX 4070 Laptop GPU | 2860 |
|
||||
| NVIDIA GeForce RTX 4060 | 2882 |
|
||||
| NVIDIA GeForce RTX 4060 Laptop GPU | 28A0 |
|
||||
| NVIDIA GeForce RTX 4050 Laptop GPU | 28A1 |
|
||||
| NVIDIA GeForce RTX 3050 A Laptop GPU | 28A3 |
|
||||
| NVIDIA RTX 2000 Ada Generation | 28B0 1028 1870 |
|
||||
| NVIDIA RTX 2000 Ada Generation | 28B0 103C 1870 |
|
||||
| NVIDIA RTX 2000E Ada Generation | 28B0 103C 1871 |
|
||||
| NVIDIA RTX 2000 Ada Generation | 28B0 10DE 1870 |
|
||||
| NVIDIA RTX 2000E Ada Generation | 28B0 10DE 1871 |
|
||||
| NVIDIA RTX 2000 Ada Generation | 28B0 17AA 1870 |
|
||||
| NVIDIA RTX 2000E Ada Generation | 28B0 17AA 1871 |
|
||||
| NVIDIA RTX 2000 Ada Generation Laptop GPU | 28B8 |
|
||||
| NVIDIA RTX 1000 Ada Generation Laptop GPU | 28B9 |
|
||||
| NVIDIA RTX 500 Ada Generation Laptop GPU | 28BA |
|
||||
| NVIDIA RTX 500 Ada Generation Laptop GPU | 28BB |
|
||||
| NVIDIA GeForce RTX 4060 Laptop GPU | 28E0 |
|
||||
| NVIDIA GeForce RTX 4050 Laptop GPU | 28E1 |
|
||||
| NVIDIA RTX 2000 Ada Generation Embedded GPU | 28F8 |
|
||||
|
||||
@@ -57,6 +57,20 @@ ifeq ($(NV_UNDEF_BEHAVIOR_SANITIZER),1)
|
||||
UBSAN_SANITIZE := y
|
||||
endif
|
||||
|
||||
#
|
||||
# Command to create a symbolic link, explicitly resolving the symlink target
|
||||
# to an absolute path to abstract away the difference between Linux < 6.13,
|
||||
# where the CWD is the Linux kernel source tree for Kbuild extmod builds, and
|
||||
# Linux >= 6.13, where the CWD is the external module source tree.
|
||||
#
|
||||
# This is used to create the nv*-kernel.o -> nv*-kernel.o_binary symlinks for
|
||||
# kernel modules which use precompiled binary object files.
|
||||
#
|
||||
|
||||
quiet_cmd_symlink = SYMLINK $@
|
||||
cmd_symlink = ln -sf $(abspath $<) $@
|
||||
|
||||
|
||||
$(foreach _module, $(NV_KERNEL_MODULES), \
|
||||
$(eval include $(src)/$(_module)/$(_module).Kbuild))
|
||||
|
||||
@@ -70,9 +84,25 @@ $(foreach _module, $(NV_KERNEL_MODULES), \
|
||||
|
||||
EXTRA_CFLAGS += -I$(src)/common/inc
|
||||
EXTRA_CFLAGS += -I$(src)
|
||||
EXTRA_CFLAGS += -Wall -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
|
||||
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
|
||||
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
|
||||
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.60.13\"
|
||||
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.142\"
|
||||
|
||||
ifneq ($(SYSSRCHOST1X),)
|
||||
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
|
||||
endif
|
||||
|
||||
# Some Android kernels prohibit driver use of filesystem functions like
|
||||
# filp_open() and kernel_read(). Disable the NV_FILESYSTEM_ACCESS_AVAILABLE
|
||||
# functionality that uses those functions when building for Android.
|
||||
|
||||
PLATFORM_IS_ANDROID ?= 0
|
||||
|
||||
ifeq ($(PLATFORM_IS_ANDROID),1)
|
||||
EXTRA_CFLAGS += -DNV_FILESYSTEM_ACCESS_AVAILABLE=0
|
||||
else
|
||||
EXTRA_CFLAGS += -DNV_FILESYSTEM_ACCESS_AVAILABLE=1
|
||||
endif
|
||||
|
||||
EXTRA_CFLAGS += -Wno-unused-function
|
||||
|
||||
@@ -87,7 +117,7 @@ ifeq ($(ARCH),arm64)
|
||||
endif
|
||||
|
||||
ifeq ($(NV_BUILD_TYPE),debug)
|
||||
EXTRA_CFLAGS += -g -gsplit-dwarf
|
||||
EXTRA_CFLAGS += -g
|
||||
endif
|
||||
|
||||
EXTRA_CFLAGS += -ffreestanding
|
||||
@@ -118,6 +148,16 @@ ifneq ($(wildcard /proc/sgi_uv),)
|
||||
EXTRA_CFLAGS += -DNV_CONFIG_X86_UV
|
||||
endif
|
||||
|
||||
ifdef VGX_FORCE_VFIO_PCI_CORE
|
||||
EXTRA_CFLAGS += -DNV_VGPU_FORCE_VFIO_PCI_CORE
|
||||
endif
|
||||
|
||||
WARNINGS_AS_ERRORS ?=
|
||||
ifeq ($(WARNINGS_AS_ERRORS),1)
|
||||
ccflags-y += -Werror
|
||||
else
|
||||
ccflags-y += -Wno-error
|
||||
endif
|
||||
|
||||
#
|
||||
# The conftest.sh script tests various aspects of the target kernel.
|
||||
@@ -144,6 +184,9 @@ NV_CONFTEST_CMD := /bin/sh $(NV_CONFTEST_SCRIPT) \
|
||||
NV_CFLAGS_FROM_CONFTEST := $(shell $(NV_CONFTEST_CMD) build_cflags)
|
||||
|
||||
NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(EXTRA_CFLAGS) -fno-pie
|
||||
NV_CONFTEST_CFLAGS += $(call cc-disable-warning,pointer-sign)
|
||||
NV_CONFTEST_CFLAGS += $(call cc-option,-fshort-wchar,)
|
||||
NV_CONFTEST_CFLAGS += -Wno-error
|
||||
|
||||
NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h
|
||||
NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/functions.h
|
||||
@@ -203,96 +246,7 @@ $(obj)/conftest/patches.h: $(NV_CONFTEST_SCRIPT)
|
||||
@mkdir -p $(obj)/conftest
|
||||
@$(NV_CONFTEST_CMD) patch_check > $@
|
||||
|
||||
|
||||
# Each of these headers is checked for presence with a test #include; a
|
||||
# corresponding #define will be generated in conftest/headers.h.
|
||||
NV_HEADER_PRESENCE_TESTS = \
|
||||
asm/system.h \
|
||||
drm/drmP.h \
|
||||
drm/drm_auth.h \
|
||||
drm/drm_gem.h \
|
||||
drm/drm_crtc.h \
|
||||
drm/drm_atomic.h \
|
||||
drm/drm_atomic_helper.h \
|
||||
drm/drm_encoder.h \
|
||||
drm/drm_atomic_uapi.h \
|
||||
drm/drm_drv.h \
|
||||
drm/drm_framebuffer.h \
|
||||
drm/drm_connector.h \
|
||||
drm/drm_probe_helper.h \
|
||||
drm/drm_blend.h \
|
||||
drm/drm_fourcc.h \
|
||||
drm/drm_prime.h \
|
||||
drm/drm_plane.h \
|
||||
drm/drm_vblank.h \
|
||||
drm/drm_file.h \
|
||||
drm/drm_ioctl.h \
|
||||
drm/drm_device.h \
|
||||
drm/drm_mode_config.h \
|
||||
drm/drm_modeset_lock.h \
|
||||
dt-bindings/interconnect/tegra_icc_id.h \
|
||||
generated/autoconf.h \
|
||||
generated/compile.h \
|
||||
generated/utsrelease.h \
|
||||
linux/efi.h \
|
||||
linux/kconfig.h \
|
||||
linux/platform/tegra/mc_utils.h \
|
||||
linux/semaphore.h \
|
||||
linux/printk.h \
|
||||
linux/ratelimit.h \
|
||||
linux/prio_tree.h \
|
||||
linux/log2.h \
|
||||
linux/of.h \
|
||||
linux/bug.h \
|
||||
linux/sched.h \
|
||||
linux/sched/mm.h \
|
||||
linux/sched/signal.h \
|
||||
linux/sched/task.h \
|
||||
linux/sched/task_stack.h \
|
||||
xen/ioemu.h \
|
||||
linux/fence.h \
|
||||
linux/dma-resv.h \
|
||||
soc/tegra/chip-id.h \
|
||||
soc/tegra/fuse.h \
|
||||
soc/tegra/tegra_bpmp.h \
|
||||
video/nv_internal.h \
|
||||
linux/platform/tegra/dce/dce-client-ipc.h \
|
||||
linux/nvhost.h \
|
||||
linux/nvhost_t194.h \
|
||||
asm/book3s/64/hash-64k.h \
|
||||
asm/set_memory.h \
|
||||
asm/prom.h \
|
||||
asm/powernv.h \
|
||||
linux/atomic.h \
|
||||
asm/barrier.h \
|
||||
asm/opal-api.h \
|
||||
sound/hdaudio.h \
|
||||
asm/pgtable_types.h \
|
||||
linux/stringhash.h \
|
||||
linux/dma-map-ops.h \
|
||||
rdma/peer_mem.h \
|
||||
sound/hda_codec.h \
|
||||
linux/dma-buf.h \
|
||||
linux/time.h \
|
||||
linux/platform_device.h \
|
||||
linux/mutex.h \
|
||||
linux/reset.h \
|
||||
linux/of_platform.h \
|
||||
linux/of_device.h \
|
||||
linux/of_gpio.h \
|
||||
linux/gpio.h \
|
||||
linux/gpio/consumer.h \
|
||||
linux/interconnect.h \
|
||||
linux/pm_runtime.h \
|
||||
linux/clk.h \
|
||||
linux/clk-provider.h \
|
||||
linux/ioasid.h \
|
||||
linux/stdarg.h \
|
||||
linux/iosys-map.h \
|
||||
asm/coco.h \
|
||||
linux/vfio_pci_core.h \
|
||||
soc/tegra/bpmp-abi.h \
|
||||
soc/tegra/bpmp.h
|
||||
include $(src)/header-presence-tests.mk
|
||||
|
||||
# Filename to store the define for the header in $(1); this is only consumed by
|
||||
# the rule below that concatenates all of these together.
|
||||
|
||||
@@ -28,7 +28,7 @@ else
|
||||
else
|
||||
KERNEL_UNAME ?= $(shell uname -r)
|
||||
KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME)
|
||||
KERNEL_SOURCES := $(shell test -d $(KERNEL_MODLIB)/source && echo $(KERNEL_MODLIB)/source || echo $(KERNEL_MODLIB)/build)
|
||||
KERNEL_SOURCES := $(shell ((test -d $(KERNEL_MODLIB)/source && echo $(KERNEL_MODLIB)/source) || (test -d $(KERNEL_MODLIB)/build/source && echo $(KERNEL_MODLIB)/build/source)) || echo $(KERNEL_MODLIB)/build)
|
||||
endif
|
||||
|
||||
KERNEL_OUTPUT := $(KERNEL_SOURCES)
|
||||
@@ -42,12 +42,32 @@ else
|
||||
else
|
||||
KERNEL_UNAME ?= $(shell uname -r)
|
||||
KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME)
|
||||
ifeq ($(KERNEL_SOURCES), $(KERNEL_MODLIB)/source)
|
||||
# $(filter patter...,text) - Returns all whitespace-separated words in text that
|
||||
# do match any of the pattern words, removing any words that do not match.
|
||||
# Set the KERNEL_OUTPUT only if either $(KERNEL_MODLIB)/source or
|
||||
# $(KERNEL_MODLIB)/build/source path matches the KERNEL_SOURCES.
|
||||
ifneq ($(filter $(KERNEL_SOURCES),$(KERNEL_MODLIB)/source $(KERNEL_MODLIB)/build/source),)
|
||||
KERNEL_OUTPUT := $(KERNEL_MODLIB)/build
|
||||
KBUILD_PARAMS := KBUILD_OUTPUT=$(KERNEL_OUTPUT)
|
||||
endif
|
||||
endif
|
||||
|
||||
# If CC hasn't been set explicitly, check the value of CONFIG_CC_VERSION_TEXT.
|
||||
# Look for the compiler specified there, and use it by default, if found.
|
||||
ifeq ($(origin CC),default)
|
||||
cc_version_text=$(firstword $(shell . $(KERNEL_OUTPUT)/.config; \
|
||||
echo "$$CONFIG_CC_VERSION_TEXT"))
|
||||
|
||||
ifneq ($(cc_version_text),)
|
||||
ifeq ($(shell command -v $(cc_version_text)),)
|
||||
$(warning WARNING: Unable to locate the compiler $(cc_version_text) \
|
||||
from CONFIG_CC_VERSION_TEXT in the kernel configuration.)
|
||||
else
|
||||
CC=$(cc_version_text)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
CC ?= cc
|
||||
LD ?= ld
|
||||
OBJDUMP ?= objdump
|
||||
@@ -57,12 +77,25 @@ else
|
||||
-e 's/armv[0-7]\w\+/arm/' \
|
||||
-e 's/aarch64/arm64/' \
|
||||
-e 's/ppc64le/powerpc/' \
|
||||
-e 's/riscv64/riscv/' \
|
||||
)
|
||||
endif
|
||||
|
||||
KERNEL_ARCH = $(ARCH)
|
||||
|
||||
ifneq ($(filter $(ARCH),i386 x86_64),)
|
||||
KERNEL_ARCH = x86
|
||||
else
|
||||
ifeq ($(filter $(ARCH),arm64 powerpc),)
|
||||
$(error Unsupported architecture $(ARCH))
|
||||
endif
|
||||
endif
|
||||
|
||||
NV_KERNEL_MODULES ?= $(wildcard nvidia nvidia-uvm nvidia-vgpu-vfio nvidia-modeset nvidia-drm nvidia-peermem)
|
||||
NV_KERNEL_MODULES := $(filter-out $(NV_EXCLUDE_KERNEL_MODULES), \
|
||||
$(NV_KERNEL_MODULES))
|
||||
INSTALL_MOD_DIR ?= kernel/drivers/video
|
||||
|
||||
NV_VERBOSE ?=
|
||||
SPECTRE_V2_RETPOLINE ?= 0
|
||||
|
||||
@@ -74,7 +107,7 @@ else
|
||||
KBUILD_PARAMS += NV_KERNEL_SOURCES=$(KERNEL_SOURCES)
|
||||
KBUILD_PARAMS += NV_KERNEL_OUTPUT=$(KERNEL_OUTPUT)
|
||||
KBUILD_PARAMS += NV_KERNEL_MODULES="$(NV_KERNEL_MODULES)"
|
||||
KBUILD_PARAMS += INSTALL_MOD_DIR=kernel/drivers/video
|
||||
KBUILD_PARAMS += INSTALL_MOD_DIR="$(INSTALL_MOD_DIR)"
|
||||
KBUILD_PARAMS += NV_SPECTRE_V2=$(SPECTRE_V2_RETPOLINE)
|
||||
|
||||
.PHONY: modules module clean clean_conftest modules_install
|
||||
@@ -99,8 +132,9 @@ else
|
||||
# module symbols on which the Linux kernel's module resolution is dependent
|
||||
# and hence must be used whenever present.
|
||||
|
||||
LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \
|
||||
$(KERNEL_SOURCES)/arch/$(ARCH)/kernel/module.lds \
|
||||
LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \
|
||||
$(KERNEL_SOURCES)/arch/$(KERNEL_ARCH)/kernel/module.lds \
|
||||
$(KERNEL_OUTPUT)/arch/$(KERNEL_ARCH)/module.lds \
|
||||
$(KERNEL_OUTPUT)/scripts/module.lds
|
||||
NV_MODULE_COMMON_SCRIPTS := $(foreach s, $(wildcard $(LD_SCRIPT)), -T $(s))
|
||||
|
||||
|
||||
43
kernel-open/common/inc/nv-chardev-numbers.h
Normal file
43
kernel-open/common/inc/nv-chardev-numbers.h
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _NV_CHARDEV_NUMBERS_H_
|
||||
#define _NV_CHARDEV_NUMBERS_H_
|
||||
|
||||
// NVIDIA's reserved major character device number (Linux).
|
||||
#define NV_MAJOR_DEVICE_NUMBER 195
|
||||
|
||||
// Minor numbers 0 to 247 reserved for regular devices
|
||||
#define NV_MINOR_DEVICE_NUMBER_REGULAR_MAX 247
|
||||
|
||||
// Minor numbers 248 to 253 currently unused
|
||||
|
||||
// Minor number 254 reserved for the modeset device (provided by NVKMS)
|
||||
#define NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE 254
|
||||
|
||||
// Minor number 255 reserved for the control device
|
||||
#define NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE 255
|
||||
|
||||
#endif // _NV_CHARDEV_NUMBERS_H_
|
||||
|
||||
83
kernel-open/common/inc/nv-firmware-registry.h
Normal file
83
kernel-open/common/inc/nv-firmware-registry.h
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
//
|
||||
// This file holds GPU firmware related registry key definitions that are
|
||||
// shared between Windows and Unix
|
||||
//
|
||||
|
||||
#ifndef NV_FIRMWARE_REGISTRY_H
|
||||
#define NV_FIRMWARE_REGISTRY_H
|
||||
|
||||
//
|
||||
// Registry key that when enabled, will enable use of GPU firmware.
|
||||
//
|
||||
// Possible mode values:
|
||||
// 0 - Do not enable GPU firmware
|
||||
// 1 - Enable GPU firmware
|
||||
// 2 - (Default) Use the default enablement policy for GPU firmware
|
||||
//
|
||||
// Setting this to anything other than 2 will alter driver firmware-
|
||||
// enablement policies, possibly disabling GPU firmware where it would
|
||||
// have otherwise been enabled by default.
|
||||
//
|
||||
// Policy bits:
|
||||
//
|
||||
// POLICY_ALLOW_FALLBACK:
|
||||
// As the normal behavior is to fail GPU initialization if this registry
|
||||
// entry is set in such a way that results in an invalid configuration, if
|
||||
// instead the user would like the driver to automatically try to fallback
|
||||
// to initializing the failing GPU with firmware disabled, then this bit can
|
||||
// be set (ex: 0x11 means try to enable GPU firmware but fall back if needed).
|
||||
// Note that this can result in a mixed mode configuration (ex: GPU0 has
|
||||
// firmware enabled, but GPU1 does not).
|
||||
//
|
||||
#define NV_REG_STR_ENABLE_GPU_FIRMWARE "EnableGpuFirmware"
|
||||
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
|
||||
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
|
||||
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012
|
||||
|
||||
//
|
||||
// Registry key that when enabled, will send GPU firmware logs
|
||||
// to the system log, when possible.
|
||||
//
|
||||
// Possible values:
|
||||
// 0 - Do not send GPU firmware logs to the system log
|
||||
// 1 - Enable sending of GPU firmware logs to the system log
|
||||
// 2 - (Default) Enable sending of GPU firmware logs to the system log for
|
||||
// the debug kernel driver build only
|
||||
//
|
||||
#define NV_REG_STR_ENABLE_GPU_FIRMWARE_LOGS "EnableGpuFirmwareLogs"
|
||||
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001
|
||||
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002
|
||||
|
||||
#endif // NV_FIRMWARE_REGISTRY_H
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -81,12 +81,12 @@ static inline const char *nv_firmware_path(
|
||||
{
|
||||
switch (fw_chip_family)
|
||||
{
|
||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X:
|
||||
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_ad10x.bin");
|
||||
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
|
||||
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_ga10x.bin");
|
||||
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GA10X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
|
||||
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_tu10x.bin");
|
||||
@@ -100,12 +100,12 @@ static inline const char *nv_firmware_path(
|
||||
{
|
||||
switch (fw_chip_family)
|
||||
{
|
||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X:
|
||||
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_ad10x.bin");
|
||||
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
|
||||
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_ga10x.bin");
|
||||
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GA10X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
|
||||
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_tu10x.bin");
|
||||
@@ -125,7 +125,7 @@ static inline const char *nv_firmware_path(
|
||||
// which will then be invoked (at the top-level) for each
|
||||
// gsp_*.bin (but not gsp_log_*.bin)
|
||||
#if defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
|
||||
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_ad10x.bin")
|
||||
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_ga10x.bin")
|
||||
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_tu10x.bin")
|
||||
#endif // defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
|
||||
|
||||
|
||||
@@ -37,12 +37,11 @@ typedef enum _HYPERVISOR_TYPE
|
||||
OS_HYPERVISOR_UNKNOWN
|
||||
} HYPERVISOR_TYPE;
|
||||
|
||||
#define CMD_VGPU_VFIO_WAKE_WAIT_QUEUE 0
|
||||
#define CMD_VGPU_VFIO_INJECT_INTERRUPT 1
|
||||
#define CMD_VGPU_VFIO_REGISTER_MDEV 2
|
||||
#define CMD_VGPU_VFIO_PRESENT 3
|
||||
#define CMD_VFIO_WAKE_REMOVE_GPU 1
|
||||
#define CMD_VGPU_VFIO_PRESENT 2
|
||||
#define CMD_VFIO_PCI_CORE_PRESENT 3
|
||||
|
||||
#define MAX_VF_COUNT_PER_GPU 64
|
||||
#define MAX_VF_COUNT_PER_GPU 64
|
||||
|
||||
typedef enum _VGPU_TYPE_INFO
|
||||
{
|
||||
@@ -53,16 +52,11 @@ typedef enum _VGPU_TYPE_INFO
|
||||
|
||||
typedef struct
|
||||
{
|
||||
void *vgpuVfioRef;
|
||||
void *waitQueue;
|
||||
void *nv;
|
||||
NvU32 *vgpuTypeIds;
|
||||
NvU32 numVgpuTypes;
|
||||
NvU32 domain;
|
||||
NvU8 bus;
|
||||
NvU8 slot;
|
||||
NvU8 function;
|
||||
NvBool is_virtfn;
|
||||
NvU32 domain;
|
||||
NvU32 bus;
|
||||
NvU32 device;
|
||||
NvU32 return_status;
|
||||
} vgpu_vfio_info;
|
||||
|
||||
typedef struct
|
||||
@@ -90,30 +84,6 @@ typedef enum VGPU_DEVICE_STATE_E
|
||||
NV_VGPU_DEV_IN_USE = 2
|
||||
} VGPU_DEVICE_STATE;
|
||||
|
||||
typedef enum _VMBUS_CMD_TYPE
|
||||
{
|
||||
VMBUS_CMD_TYPE_INVALID = 0,
|
||||
VMBUS_CMD_TYPE_SETUP = 1,
|
||||
VMBUS_CMD_TYPE_SENDPACKET = 2,
|
||||
VMBUS_CMD_TYPE_CLEANUP = 3,
|
||||
} VMBUS_CMD_TYPE;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 request_id;
|
||||
NvU32 page_count;
|
||||
NvU64 *pPfns;
|
||||
void *buffer;
|
||||
NvU32 bufferlen;
|
||||
} vmbus_send_packet_cmd_params;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 override_sint;
|
||||
NvU8 *nv_guid;
|
||||
} vmbus_setup_cmd_params;
|
||||
|
||||
/*
|
||||
* Function prototypes
|
||||
*/
|
||||
|
||||
@@ -25,14 +25,12 @@
|
||||
#ifndef NV_IOCTL_NUMA_H
|
||||
#define NV_IOCTL_NUMA_H
|
||||
|
||||
#if defined(NV_LINUX)
|
||||
|
||||
#include <nv-ioctl-numbers.h>
|
||||
|
||||
#if defined(NV_KERNEL_INTERFACE_LAYER)
|
||||
|
||||
#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
|
||||
#include <linux/types.h>
|
||||
|
||||
#elif defined (NV_KERNEL_INTERFACE_LAYER) && defined(NV_BSD)
|
||||
#include <sys/stdint.h>
|
||||
#else
|
||||
|
||||
#include <stdint.h>
|
||||
@@ -62,6 +60,7 @@ typedef struct nv_ioctl_numa_info
|
||||
uint64_t memblock_size __aligned(8);
|
||||
uint64_t numa_mem_addr __aligned(8);
|
||||
uint64_t numa_mem_size __aligned(8);
|
||||
uint8_t use_auto_online;
|
||||
nv_offline_addresses_t offline_addresses __aligned(8);
|
||||
} nv_ioctl_numa_info_t;
|
||||
|
||||
@@ -80,5 +79,3 @@ typedef struct nv_ioctl_set_numa_status
|
||||
#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -39,5 +39,6 @@
|
||||
#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13)
|
||||
#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14)
|
||||
#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17)
|
||||
#define NV_ESC_WAIT_OPEN_COMPLETE (NV_IOCTL_BASE + 18)
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -104,7 +104,7 @@ typedef struct nv_ioctl_rm_api_version
|
||||
|
||||
#define NV_RM_API_VERSION_CMD_STRICT 0
|
||||
#define NV_RM_API_VERSION_CMD_RELAXED '1'
|
||||
#define NV_RM_API_VERSION_CMD_OVERRIDE '2'
|
||||
#define NV_RM_API_VERSION_CMD_QUERY '2'
|
||||
|
||||
#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0
|
||||
#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1
|
||||
@@ -142,4 +142,10 @@ typedef struct nv_ioctl_export_to_dma_buf_fd
|
||||
NvU32 status;
|
||||
} nv_ioctl_export_to_dma_buf_fd_t;
|
||||
|
||||
typedef struct nv_ioctl_wait_open_complete
|
||||
{
|
||||
int rc;
|
||||
NvU32 adapterStatus;
|
||||
} nv_ioctl_wait_open_complete_t;
|
||||
|
||||
#endif
|
||||
|
||||
62
kernel-open/common/inc/nv-kthread-q-os.h
Normal file
62
kernel-open/common/inc/nv-kthread-q-os.h
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NV_KTHREAD_QUEUE_OS_H__
|
||||
#define __NV_KTHREAD_QUEUE_OS_H__
|
||||
|
||||
#include <linux/types.h> // atomic_t
|
||||
#include <linux/list.h> // list
|
||||
#include <linux/sched.h> // task_struct
|
||||
#include <linux/numa.h> // NUMA_NO_NODE
|
||||
#include <linux/semaphore.h>
|
||||
|
||||
#include "conftest.h"
|
||||
|
||||
struct nv_kthread_q
|
||||
{
|
||||
struct list_head q_list_head;
|
||||
spinlock_t q_lock;
|
||||
|
||||
// This is a counting semaphore. It gets incremented and decremented
|
||||
// exactly once for each item that is added to the queue.
|
||||
struct semaphore q_sem;
|
||||
atomic_t main_loop_should_exit;
|
||||
|
||||
struct task_struct *q_kthread;
|
||||
};
|
||||
|
||||
struct nv_kthread_q_item
|
||||
{
|
||||
struct list_head q_list_node;
|
||||
nv_q_func_t function_to_run;
|
||||
void *function_args;
|
||||
};
|
||||
|
||||
|
||||
#ifndef NUMA_NO_NODE
|
||||
#define NUMA_NO_NODE (-1)
|
||||
#endif
|
||||
|
||||
#define NV_KTHREAD_NO_NODE NUMA_NO_NODE
|
||||
|
||||
#endif
|
||||
@@ -24,18 +24,14 @@
|
||||
#ifndef __NV_KTHREAD_QUEUE_H__
|
||||
#define __NV_KTHREAD_QUEUE_H__
|
||||
|
||||
#include <linux/types.h> // atomic_t
|
||||
#include <linux/list.h> // list
|
||||
#include <linux/sched.h> // task_struct
|
||||
#include <linux/numa.h> // NUMA_NO_NODE
|
||||
struct nv_kthread_q;
|
||||
struct nv_kthread_q_item;
|
||||
typedef struct nv_kthread_q nv_kthread_q_t;
|
||||
typedef struct nv_kthread_q_item nv_kthread_q_item_t;
|
||||
|
||||
#include "conftest.h"
|
||||
typedef void (*nv_q_func_t)(void *args);
|
||||
|
||||
#if defined(NV_LINUX_SEMAPHORE_H_PRESENT)
|
||||
#include <linux/semaphore.h>
|
||||
#else
|
||||
#include <asm/semaphore.h>
|
||||
#endif
|
||||
#include "nv-kthread-q-os.h"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// nv_kthread_q:
|
||||
@@ -90,38 +86,6 @@
|
||||
//
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef struct nv_kthread_q nv_kthread_q_t;
|
||||
typedef struct nv_kthread_q_item nv_kthread_q_item_t;
|
||||
|
||||
typedef void (*nv_q_func_t)(void *args);
|
||||
|
||||
struct nv_kthread_q
|
||||
{
|
||||
struct list_head q_list_head;
|
||||
spinlock_t q_lock;
|
||||
|
||||
// This is a counting semaphore. It gets incremented and decremented
|
||||
// exactly once for each item that is added to the queue.
|
||||
struct semaphore q_sem;
|
||||
atomic_t main_loop_should_exit;
|
||||
|
||||
struct task_struct *q_kthread;
|
||||
};
|
||||
|
||||
struct nv_kthread_q_item
|
||||
{
|
||||
struct list_head q_list_node;
|
||||
nv_q_func_t function_to_run;
|
||||
void *function_args;
|
||||
};
|
||||
|
||||
|
||||
#ifndef NUMA_NO_NODE
|
||||
#define NUMA_NO_NODE (-1)
|
||||
#endif
|
||||
|
||||
#define NV_KTHREAD_NO_NODE NUMA_NO_NODE
|
||||
|
||||
//
|
||||
// The queue must not be used before calling this routine.
|
||||
//
|
||||
@@ -160,10 +124,7 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q,
|
||||
// This routine is the same as nv_kthread_q_init_on_node() with the exception
|
||||
// that the queue stack will be allocated on the NUMA node of the caller.
|
||||
//
|
||||
static inline int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
|
||||
{
|
||||
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
|
||||
}
|
||||
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname);
|
||||
|
||||
//
|
||||
// The caller is responsible for stopping all queues, by calling this routine
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2001-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -35,6 +35,7 @@
|
||||
#include "os-interface.h"
|
||||
#include "nv-timer.h"
|
||||
#include "nv-time.h"
|
||||
#include "nv-chardev-numbers.h"
|
||||
|
||||
#define NV_KERNEL_NAME "Linux"
|
||||
|
||||
@@ -211,6 +212,7 @@
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/memory.h>
|
||||
|
||||
#include <linux/workqueue.h> /* workqueue */
|
||||
#include "nv-kthread-q.h" /* kthread based queue */
|
||||
@@ -247,7 +249,7 @@ NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32,
|
||||
#undef NV_SET_PAGES_UC_PRESENT
|
||||
#endif
|
||||
|
||||
#if !defined(NVCPU_AARCH64) && !defined(NVCPU_PPC64LE)
|
||||
#if !defined(NVCPU_AARCH64) && !defined(NVCPU_PPC64LE) && !defined(NVCPU_RISCV64)
|
||||
#if !defined(NV_SET_MEMORY_UC_PRESENT) && !defined(NV_SET_PAGES_UC_PRESENT)
|
||||
#error "This driver requires the ability to change memory types!"
|
||||
#endif
|
||||
@@ -405,32 +407,6 @@ extern int nv_pat_mode;
|
||||
#define NV_GFP_DMA32 (NV_GFP_KERNEL)
|
||||
#endif
|
||||
|
||||
extern NvBool nvos_is_chipset_io_coherent(void);
|
||||
|
||||
#if defined(NVCPU_X86_64)
|
||||
#define CACHE_FLUSH() asm volatile("wbinvd":::"memory")
|
||||
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
|
||||
#elif defined(NVCPU_AARCH64)
|
||||
static inline void nv_flush_cache_cpu(void *info)
|
||||
{
|
||||
if (!nvos_is_chipset_io_coherent())
|
||||
{
|
||||
#if defined(NV_FLUSH_CACHE_ALL_PRESENT)
|
||||
flush_cache_all();
|
||||
#else
|
||||
WARN_ONCE(0, "NVRM: kernel does not support flush_cache_all()\n");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#define CACHE_FLUSH() nv_flush_cache_cpu(NULL)
|
||||
#define CACHE_FLUSH_ALL() on_each_cpu(nv_flush_cache_cpu, NULL, 1)
|
||||
#define WRITE_COMBINE_FLUSH() mb()
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
#define CACHE_FLUSH() asm volatile("sync; \n" \
|
||||
"isync; \n" ::: "memory")
|
||||
#define WRITE_COMBINE_FLUSH() CACHE_FLUSH()
|
||||
#endif
|
||||
|
||||
typedef enum
|
||||
{
|
||||
NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */
|
||||
@@ -439,7 +415,7 @@ typedef enum
|
||||
NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */
|
||||
} nv_memory_type_t;
|
||||
|
||||
#if defined(NVCPU_AARCH64) || defined(NVCPU_PPC64LE)
|
||||
#if defined(NVCPU_AARCH64) || defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64)
|
||||
#define NV_ALLOW_WRITE_COMBINING(mt) 1
|
||||
#elif defined(NVCPU_X86_64)
|
||||
#if defined(NV_ENABLE_PAT_SUPPORT)
|
||||
@@ -498,7 +474,9 @@ static inline void *nv_vmalloc(unsigned long size)
|
||||
void *ptr = __vmalloc(size, GFP_KERNEL);
|
||||
#endif
|
||||
if (ptr)
|
||||
{
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
@@ -510,9 +488,15 @@ static inline void nv_vfree(void *ptr, NvU64 size)
|
||||
|
||||
static inline void *nv_ioremap(NvU64 phys, NvU64 size)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_INTEL_TDX_GUEST) && defined(NV_IOREMAP_DRIVER_HARDENED_PRESENT)
|
||||
void *ptr = ioremap_driver_hardened(phys, size);
|
||||
#else
|
||||
void *ptr = ioremap(phys, size);
|
||||
#endif
|
||||
if (ptr)
|
||||
{
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
@@ -523,11 +507,11 @@ static inline void *nv_ioremap_nocache(NvU64 phys, NvU64 size)
|
||||
|
||||
static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size)
|
||||
{
|
||||
#if defined(NV_IOREMAP_CACHE_PRESENT)
|
||||
void *ptr = ioremap_cache(phys, size);
|
||||
if (ptr)
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
return ptr;
|
||||
void *ptr = NULL;
|
||||
#if IS_ENABLED(CONFIG_INTEL_TDX_GUEST) && defined(NV_IOREMAP_CACHE_SHARED_PRESENT)
|
||||
ptr = ioremap_cache_shared(phys, size);
|
||||
#elif defined(NV_IOREMAP_CACHE_PRESENT)
|
||||
ptr = ioremap_cache(phys, size);
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
//
|
||||
// ioremap_cache() has been only implemented correctly for ppc64le with
|
||||
@@ -542,25 +526,34 @@ static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size)
|
||||
// (commit 40f1ce7fb7e8, kernel 3.0+) and that covers all kernels we
|
||||
// support on power.
|
||||
//
|
||||
void *ptr = ioremap_prot(phys, size, pgprot_val(PAGE_KERNEL));
|
||||
if (ptr)
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
return ptr;
|
||||
ptr = ioremap_prot(phys, size, pgprot_val(PAGE_KERNEL));
|
||||
#else
|
||||
return nv_ioremap(phys, size);
|
||||
#endif
|
||||
|
||||
if (ptr)
|
||||
{
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline void *nv_ioremap_wc(NvU64 phys, NvU64 size)
|
||||
{
|
||||
#if defined(NV_IOREMAP_WC_PRESENT)
|
||||
void *ptr = ioremap_wc(phys, size);
|
||||
if (ptr)
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
return ptr;
|
||||
void *ptr = NULL;
|
||||
#if IS_ENABLED(CONFIG_INTEL_TDX_GUEST) && defined(NV_IOREMAP_DRIVER_HARDENED_WC_PRESENT)
|
||||
ptr = ioremap_driver_hardened_wc(phys, size);
|
||||
#elif defined(NV_IOREMAP_WC_PRESENT)
|
||||
ptr = ioremap_wc(phys, size);
|
||||
#else
|
||||
return nv_ioremap_nocache(phys, size);
|
||||
#endif
|
||||
|
||||
if (ptr)
|
||||
{
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline void nv_iounmap(void *ptr, NvU64 size)
|
||||
@@ -633,6 +626,26 @@ static NvBool nv_numa_node_has_memory(int node_id)
|
||||
free_pages(ptr, order); \
|
||||
}
|
||||
|
||||
static inline pgprot_t nv_sme_clr(pgprot_t prot)
|
||||
{
|
||||
#if defined(__sme_clr)
|
||||
return __pgprot(__sme_clr(pgprot_val(prot)));
|
||||
#else
|
||||
return prot;
|
||||
#endif // __sme_clr
|
||||
}
|
||||
|
||||
static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot, NvU32 extra)
|
||||
{
|
||||
pgprot_t prot = __pgprot(pgprot_val(vm_prot) | extra);
|
||||
|
||||
#if defined(pgprot_decrypted)
|
||||
return pgprot_decrypted(prot);
|
||||
#else
|
||||
return nv_sme_clr(prot);
|
||||
#endif // pgprot_decrypted
|
||||
}
|
||||
|
||||
#if defined(PAGE_KERNEL_NOENC)
|
||||
#if defined(__pgprot_mask)
|
||||
#define NV_PAGE_KERNEL_NOCACHE_NOENC __pgprot_mask(__PAGE_KERNEL_NOCACHE)
|
||||
@@ -654,7 +667,8 @@ static inline NvUPtr nv_vmap(struct page **pages, NvU32 page_count,
|
||||
#if defined(PAGE_KERNEL_NOENC)
|
||||
if (unencrypted)
|
||||
{
|
||||
prot = cached ? PAGE_KERNEL_NOENC : NV_PAGE_KERNEL_NOCACHE_NOENC;
|
||||
prot = cached ? nv_adjust_pgprot(PAGE_KERNEL_NOENC, 0) :
|
||||
nv_adjust_pgprot(NV_PAGE_KERNEL_NOCACHE_NOENC, 0);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
@@ -667,7 +681,9 @@ static inline NvUPtr nv_vmap(struct page **pages, NvU32 page_count,
|
||||
/* All memory cached in PPC64LE; can't honor 'cached' input. */
|
||||
ptr = vmap(pages, page_count, VM_MAP, prot);
|
||||
if (ptr)
|
||||
{
|
||||
NV_MEMDBG_ADD(ptr, page_count * PAGE_SIZE);
|
||||
}
|
||||
return (NvUPtr)ptr;
|
||||
}
|
||||
|
||||
@@ -720,7 +736,6 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
|
||||
#define NV_VMA_FILE(vma) ((vma)->vm_file)
|
||||
|
||||
#define NV_DEVICE_MINOR_NUMBER(x) minor((x)->i_rdev)
|
||||
#define NV_CONTROL_DEVICE_MINOR 255
|
||||
|
||||
#define NV_PCI_DISABLE_DEVICE(pci_dev) \
|
||||
{ \
|
||||
@@ -939,26 +954,6 @@ static inline int nv_remap_page_range(struct vm_area_struct *vma,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot, NvU32 extra)
|
||||
{
|
||||
pgprot_t prot = __pgprot(pgprot_val(vm_prot) | extra);
|
||||
#if defined(CONFIG_AMD_MEM_ENCRYPT) && defined(NV_PGPROT_DECRYPTED_PRESENT)
|
||||
/*
|
||||
* When AMD memory encryption is enabled, device memory mappings with the
|
||||
* C-bit set read as 0xFF, so ensure the bit is cleared for user mappings.
|
||||
*
|
||||
* If cc_mkdec() is present, then pgprot_decrypted() can't be used.
|
||||
*/
|
||||
#if defined(NV_CC_MKDEC_PRESENT)
|
||||
prot = __pgprot(__sme_clr(pgprot_val(vm_prot)));
|
||||
#else
|
||||
prot = pgprot_decrypted(prot);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return prot;
|
||||
}
|
||||
|
||||
static inline int nv_io_remap_page_range(struct vm_area_struct *vma,
|
||||
NvU64 phys_addr, NvU64 size, NvU32 extra_prot)
|
||||
{
|
||||
@@ -1182,7 +1177,7 @@ typedef struct nv_alloc_s {
|
||||
NvBool zeroed : 1;
|
||||
NvBool aliased : 1;
|
||||
NvBool user : 1;
|
||||
NvBool node0 : 1;
|
||||
NvBool node : 1;
|
||||
NvBool peer_io : 1;
|
||||
NvBool physical : 1;
|
||||
NvBool unencrypted : 1;
|
||||
@@ -1196,6 +1191,7 @@ typedef struct nv_alloc_s {
|
||||
unsigned int pid;
|
||||
struct page **user_pages;
|
||||
NvU64 guest_id; /* id of guest VM */
|
||||
NvS32 node_id; /* Node id for memory allocation when node is set in flags */
|
||||
void *import_priv;
|
||||
struct sg_table *import_sgt;
|
||||
} nv_alloc_t;
|
||||
@@ -1308,7 +1304,7 @@ nv_dma_maps_swiotlb(struct device *dev)
|
||||
* SEV memory encryption") forces SWIOTLB to be enabled when AMD SEV
|
||||
* is active in all cases.
|
||||
*/
|
||||
if (os_sev_enabled)
|
||||
if (os_cc_enabled)
|
||||
swiotlb_in_use = NV_TRUE;
|
||||
#endif
|
||||
|
||||
@@ -1362,7 +1358,19 @@ typedef struct nv_dma_map_s {
|
||||
i < dm->mapping.discontig.submap_count; \
|
||||
i++, sm = &dm->mapping.discontig.submaps[i])
|
||||
|
||||
/*
|
||||
* On 4K ARM kernels, use max submap size a multiple of 64K to keep nv-p2p happy.
|
||||
* Despite 4K OS pages, we still use 64K P2P pages due to dependent modules still using 64K.
|
||||
* Instead of using (4G-4K), use max submap size as (4G-64K) since the mapped IOVA range
|
||||
* must be aligned at 64K boundary.
|
||||
*/
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define NV_DMA_U32_MAX_4K_PAGES ((NvU32)((NV_U32_MAX >> PAGE_SHIFT) + 1))
|
||||
#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_DMA_U32_MAX_4K_PAGES - 16))
|
||||
#else
|
||||
#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_U32_MAX >> PAGE_SHIFT))
|
||||
#endif
|
||||
|
||||
#define NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(s) (s * NV_DMA_SUBMAP_MAX_PAGES)
|
||||
|
||||
/*
|
||||
@@ -1436,6 +1444,40 @@ struct nv_dma_device {
|
||||
NvBool nvlink;
|
||||
};
|
||||
|
||||
/* Properties of the coherent link */
|
||||
typedef struct coherent_link_info_s {
|
||||
/* Physical Address of the GPU memory in SOC AMAP. In the case of
|
||||
* baremetal OS environment it is System Physical Address(SPA) and in the case
|
||||
* of virutalized OS environment it is Intermediate Physical Address(IPA) */
|
||||
NvU64 gpu_mem_pa;
|
||||
|
||||
/* Physical address of the reserved portion of the GPU memory, applicable
|
||||
* only in Grace Hopper self hosted passthrough virtualizatioan platform. */
|
||||
NvU64 rsvd_mem_pa;
|
||||
|
||||
/* Bitmap of NUMA node ids, corresponding to the reserved PXMs,
|
||||
* available for adding GPU memory to the kernel as system RAM */
|
||||
DECLARE_BITMAP(free_node_bitmap, MAX_NUMNODES);
|
||||
} coherent_link_info_t;
|
||||
|
||||
#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED)
|
||||
/*
|
||||
* acpi data storage structure
|
||||
*
|
||||
* This structure retains the pointer to the device,
|
||||
* and any other baggage we want to carry along
|
||||
*
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
nvidia_stack_t *sp;
|
||||
struct acpi_device *device;
|
||||
struct acpi_handle *handle;
|
||||
void *notifier_data;
|
||||
int notify_handler_installed;
|
||||
} nv_acpi_t;
|
||||
#endif
|
||||
|
||||
/* linux-specific version of old nv_state_t */
|
||||
/* this is a general os-specific state structure. the first element *must* be
|
||||
the general state structure, for the generic unix-based code */
|
||||
@@ -1451,6 +1493,13 @@ typedef struct nv_linux_state_s {
|
||||
/* IBM-NPU info associated with this GPU */
|
||||
nv_ibmnpu_info_t *npu;
|
||||
|
||||
/* coherent link information */
|
||||
coherent_link_info_t coherent_link_info;
|
||||
|
||||
/* Dedicated queue to be used for removing FB memory which is onlined
|
||||
* to kernel as a NUMA node. Refer Bug : 3879845*/
|
||||
nv_kthread_q_t remove_numa_memory_q;
|
||||
|
||||
/* NUMA node information for the platforms where GPU memory is presented
|
||||
* as a NUMA node to the kernel */
|
||||
struct {
|
||||
@@ -1461,6 +1510,7 @@ typedef struct nv_linux_state_s {
|
||||
/* NUMA online/offline status for platforms that support GPU memory as
|
||||
* NUMA node */
|
||||
atomic_t status;
|
||||
NvBool use_auto_online;
|
||||
} numa_info;
|
||||
|
||||
nvidia_stack_t *sp[NV_DEV_STACK_COUNT];
|
||||
@@ -1530,8 +1580,13 @@ typedef struct nv_linux_state_s {
|
||||
/* Per-device notifier block for ACPI events */
|
||||
struct notifier_block acpi_nb;
|
||||
|
||||
#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED)
|
||||
nv_acpi_t* nv_acpi_object;
|
||||
#endif
|
||||
|
||||
/* Lock serializing ISRs for different SOC vectors */
|
||||
nv_spinlock_t soc_isr_lock;
|
||||
void *soc_bh_mutex;
|
||||
|
||||
struct nv_timer snapshot_timer;
|
||||
nv_spinlock_t snapshot_timer_lock;
|
||||
@@ -1547,6 +1602,30 @@ typedef struct nv_linux_state_s {
|
||||
|
||||
struct nv_dma_device dma_dev;
|
||||
struct nv_dma_device niso_dma_dev;
|
||||
|
||||
/*
|
||||
* Background kthread for handling deferred open operations
|
||||
* (e.g. from O_NONBLOCK).
|
||||
*
|
||||
* Adding to open_q and reading/writing is_accepting_opens
|
||||
* are protected by nvl->open_q_lock (not nvl->ldata_lock).
|
||||
* This allows new deferred open operations to be enqueued without
|
||||
* blocking behind previous ones (which hold nvl->ldata_lock).
|
||||
*
|
||||
* Adding to open_q is only safe if is_accepting_opens is true.
|
||||
* This prevents open operations from racing with device removal.
|
||||
*
|
||||
* Stopping open_q is only safe after setting is_accepting_opens to false.
|
||||
* This ensures that the open_q (and the larger nvl structure) will
|
||||
* outlive any of the open operations enqueued.
|
||||
*/
|
||||
nv_kthread_q_t open_q;
|
||||
NvBool is_accepting_opens;
|
||||
struct semaphore open_q_lock;
|
||||
#if defined(NV_VGPU_KVM_BUILD)
|
||||
wait_queue_head_t wait;
|
||||
NvS32 return_status;
|
||||
#endif
|
||||
} nv_linux_state_t;
|
||||
|
||||
extern nv_linux_state_t *nv_linux_devices;
|
||||
@@ -1577,24 +1656,6 @@ extern struct rw_semaphore nv_system_pm_lock;
|
||||
|
||||
extern NvBool nv_ats_supported;
|
||||
|
||||
#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED)
|
||||
/*
|
||||
* acpi data storage structure
|
||||
*
|
||||
* This structure retains the pointer to the device,
|
||||
* and any other baggage we want to carry along
|
||||
*
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
nvidia_stack_t *sp;
|
||||
struct acpi_device *device;
|
||||
struct acpi_handle *handle;
|
||||
int notify_handler_installed;
|
||||
} nv_acpi_t;
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* file-private data
|
||||
* hide a pointer to our data structures in a file-private ptr
|
||||
@@ -1608,22 +1669,13 @@ typedef struct nvidia_event
|
||||
nv_event_t event;
|
||||
} nvidia_event_t;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
NV_FOPS_STACK_INDEX_MMAP,
|
||||
NV_FOPS_STACK_INDEX_IOCTL,
|
||||
NV_FOPS_STACK_INDEX_COUNT
|
||||
} nvidia_entry_point_index_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
nv_file_private_t nvfp;
|
||||
|
||||
nvidia_stack_t *sp;
|
||||
nvidia_stack_t *fops_sp[NV_FOPS_STACK_INDEX_COUNT];
|
||||
struct semaphore fops_sp_lock[NV_FOPS_STACK_INDEX_COUNT];
|
||||
nv_alloc_t *free_list;
|
||||
void *nvptr;
|
||||
nv_linux_state_t *nvptr;
|
||||
nvidia_event_t *event_data_head, *event_data_tail;
|
||||
NvBool dataless_event_pending;
|
||||
nv_spinlock_t fp_lock;
|
||||
@@ -1634,6 +1686,12 @@ typedef struct
|
||||
nv_alloc_mapping_context_t mmap_context;
|
||||
struct address_space mapping;
|
||||
|
||||
nv_kthread_q_item_t open_q_item;
|
||||
struct completion open_complete;
|
||||
nv_linux_state_t *deferred_open_nvl;
|
||||
int open_rc;
|
||||
NV_STATUS adapter_status;
|
||||
|
||||
struct list_head entry;
|
||||
} nv_linux_file_private_t;
|
||||
|
||||
@@ -1642,6 +1700,21 @@ static inline nv_linux_file_private_t *nv_get_nvlfp_from_nvfp(nv_file_private_t
|
||||
return container_of(nvfp, nv_linux_file_private_t, nvfp);
|
||||
}
|
||||
|
||||
static inline int nv_wait_open_complete_interruptible(nv_linux_file_private_t *nvlfp)
|
||||
{
|
||||
return wait_for_completion_interruptible(&nvlfp->open_complete);
|
||||
}
|
||||
|
||||
static inline void nv_wait_open_complete(nv_linux_file_private_t *nvlfp)
|
||||
{
|
||||
wait_for_completion(&nvlfp->open_complete);
|
||||
}
|
||||
|
||||
static inline NvBool nv_is_open_complete(nv_linux_file_private_t *nvlfp)
|
||||
{
|
||||
return completion_done(&nvlfp->open_complete);
|
||||
}
|
||||
|
||||
#define NV_SET_FILE_PRIVATE(filep,data) ((filep)->private_data = (data))
|
||||
#define NV_GET_LINUX_FILE_PRIVATE(filep) ((nv_linux_file_private_t *)(filep)->private_data)
|
||||
|
||||
@@ -1651,28 +1724,6 @@ static inline nv_linux_file_private_t *nv_get_nvlfp_from_nvfp(nv_file_private_t
|
||||
|
||||
#define NV_STATE_PTR(nvl) &(((nv_linux_state_t *)(nvl))->nv_state)
|
||||
|
||||
static inline nvidia_stack_t *nv_nvlfp_get_sp(nv_linux_file_private_t *nvlfp, nvidia_entry_point_index_t which)
|
||||
{
|
||||
#if defined(NVCPU_X86_64)
|
||||
if (rm_is_altstack_in_use())
|
||||
{
|
||||
down(&nvlfp->fops_sp_lock[which]);
|
||||
return nvlfp->fops_sp[which];
|
||||
}
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nv_nvlfp_put_sp(nv_linux_file_private_t *nvlfp, nvidia_entry_point_index_t which)
|
||||
{
|
||||
#if defined(NVCPU_X86_64)
|
||||
if (rm_is_altstack_in_use())
|
||||
{
|
||||
up(&nvlfp->fops_sp_lock[which]);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define NV_ATOMIC_READ(data) atomic_read(&(data))
|
||||
#define NV_ATOMIC_SET(data,val) atomic_set(&(data), (val))
|
||||
#define NV_ATOMIC_INC(data) atomic_inc(&(data))
|
||||
@@ -1744,12 +1795,19 @@ static inline NV_STATUS nv_check_gpu_state(nv_state_t *nv)
|
||||
|
||||
extern NvU32 NVreg_EnableUserNUMAManagement;
|
||||
extern NvU32 NVreg_RegisterPCIDriver;
|
||||
extern NvU32 NVreg_EnableResizableBar;
|
||||
extern NvU32 NVreg_EnableNonblockingOpen;
|
||||
|
||||
extern NvU32 num_probed_nv_devices;
|
||||
extern NvU32 num_nv_devices;
|
||||
|
||||
#define NV_FILE_INODE(file) (file)->f_inode
|
||||
|
||||
static inline int nv_is_control_device(struct inode *inode)
|
||||
{
|
||||
return (minor((inode)->i_rdev) == NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE);
|
||||
}
|
||||
|
||||
#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD)
|
||||
#define NV_VGX_HYPER
|
||||
#if defined(NV_XEN_IOEMU_INJECT_MSI)
|
||||
@@ -1938,6 +1996,11 @@ static inline int nv_set_numa_status(nv_linux_state_t *nvl, int status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline NvBool nv_platform_use_auto_online(nv_linux_state_t *nvl)
|
||||
{
|
||||
return nvl->numa_info.use_auto_online;
|
||||
}
|
||||
|
||||
typedef enum
|
||||
{
|
||||
NV_NUMA_STATUS_DISABLED = 0,
|
||||
@@ -1998,4 +2061,7 @@ typedef enum
|
||||
#include <linux/clk-provider.h>
|
||||
#endif
|
||||
|
||||
#define NV_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL_GPL(symbol)
|
||||
#define NV_CHECK_EXPORT_SYMBOL(symbol) NV_IS_EXPORT_SYMBOL_PRESENT_##symbol
|
||||
|
||||
#endif /* _NV_LINUX_H_ */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -29,19 +29,15 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/sched.h> /* signal_pending, cond_resched */
|
||||
#include <linux/semaphore.h>
|
||||
|
||||
#if defined(NV_LINUX_SCHED_SIGNAL_H_PRESENT)
|
||||
#include <linux/sched/signal.h> /* signal_pending for kernels >= 4.11 */
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_SEMAPHORE_H_PRESENT)
|
||||
#include <linux/semaphore.h>
|
||||
#else
|
||||
#include <asm/semaphore.h>
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)
|
||||
typedef raw_spinlock_t nv_spinlock_t;
|
||||
#define NV_DEFINE_SPINLOCK(lock) DEFINE_RAW_SPINLOCK(lock)
|
||||
#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock)
|
||||
#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock)
|
||||
#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock)
|
||||
@@ -52,6 +48,7 @@ typedef raw_spinlock_t nv_spinlock_t;
|
||||
#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock)
|
||||
#else
|
||||
typedef spinlock_t nv_spinlock_t;
|
||||
#define NV_DEFINE_SPINLOCK(lock) DEFINE_SPINLOCK(lock)
|
||||
#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock)
|
||||
#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock)
|
||||
#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock)
|
||||
@@ -62,20 +59,7 @@ typedef spinlock_t nv_spinlock_t;
|
||||
#define NV_SPIN_UNLOCK_WAIT(lock) spin_unlock_wait(lock)
|
||||
#endif
|
||||
|
||||
#if defined(NV_CONFIG_PREEMPT_RT)
|
||||
#define NV_INIT_SEMA(sema, val) sema_init(sema,val)
|
||||
#else
|
||||
#if !defined(__SEMAPHORE_INITIALIZER) && defined(__COMPAT_SEMAPHORE_INITIALIZER)
|
||||
#define __SEMAPHORE_INITIALIZER __COMPAT_SEMAPHORE_INITIALIZER
|
||||
#endif
|
||||
#define NV_INIT_SEMA(sema, val) \
|
||||
{ \
|
||||
struct semaphore __sema = \
|
||||
__SEMAPHORE_INITIALIZER(*(sema), val); \
|
||||
*(sema) = __sema; \
|
||||
}
|
||||
#endif
|
||||
#define NV_INIT_MUTEX(mutex) NV_INIT_SEMA(mutex, 1)
|
||||
#define NV_INIT_MUTEX(mutex) sema_init(mutex, 1)
|
||||
|
||||
static inline int nv_down_read_interruptible(struct rw_semaphore *lock)
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -36,12 +36,27 @@ typedef int vm_fault_t;
|
||||
* pin_user_pages() was added by commit eddb1c228f7951d399240
|
||||
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6-rc1 (2020-01-30)
|
||||
*
|
||||
* Removed vmas parameter from pin_user_pages() by commit 40896a02751
|
||||
* ("mm/gup: remove vmas parameter from pin_user_pages()")
|
||||
* in linux-next, expected in v6.5-rc1 (2023-05-17)
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#if defined(NV_PIN_USER_PAGES_PRESENT)
|
||||
#define NV_PIN_USER_PAGES pin_user_pages
|
||||
|
||||
/*
|
||||
* FreeBSD's pin_user_pages's conftest breaks since pin_user_pages is an inline
|
||||
* function. Because it simply maps to get_user_pages, we can just replace
|
||||
* NV_PIN_USER_PAGES with NV_GET_USER_PAGES on FreeBSD
|
||||
*/
|
||||
#if defined(NV_PIN_USER_PAGES_PRESENT) && !defined(NV_BSD)
|
||||
#if defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS)
|
||||
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages) \
|
||||
pin_user_pages(start, nr_pages, gup_flags, pages, NULL)
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES pin_user_pages
|
||||
#endif // NV_PIN_USER_PAGES_HAS_ARGS_VMAS
|
||||
#define NV_UNPIN_USER_PAGE unpin_user_page
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES NV_GET_USER_PAGES
|
||||
@@ -64,30 +79,36 @@ typedef int vm_fault_t;
|
||||
* commit 8e50b8b07f462ab4b91bc1491b1c91bd75e4ad40 which cherry-picked the
|
||||
* replacement of the write and force parameters with gup_flags
|
||||
*
|
||||
* Removed vmas parameter from get_user_pages() by commit 7bbf9c8c99
|
||||
* ("mm/gup: remove unused vmas parameter from get_user_pages()")
|
||||
* in linux-next, expected in v6.5-rc1 (2023-05-17)
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
|
||||
#define NV_GET_USER_PAGES get_user_pages
|
||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS)
|
||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
|
||||
get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas)
|
||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
|
||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \
|
||||
get_user_pages(start, nr_pages, flags, pages, NULL)
|
||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS)
|
||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \
|
||||
get_user_pages(current, current->mm, start, nr_pages, flags, pages, NULL)
|
||||
#else
|
||||
static inline long NV_GET_USER_PAGES(unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int flags,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
struct page **pages)
|
||||
{
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE)
|
||||
return get_user_pages(start, nr_pages, write, force, pages, vmas);
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS)
|
||||
return get_user_pages(start, nr_pages, write, force, pages, NULL);
|
||||
#else
|
||||
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE
|
||||
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||
return get_user_pages(current, current->mm, start, nr_pages, write,
|
||||
force, pages, vmas);
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE
|
||||
force, pages, NULL);
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS
|
||||
}
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
|
||||
|
||||
@@ -100,15 +121,22 @@ typedef int vm_fault_t;
|
||||
* 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
|
||||
* in v5.9-rc1 (2020-08-11). *
|
||||
*
|
||||
* Removed unused vmas parameter from pin_user_pages_remote() by commit
|
||||
* 83bcc2e132("mm/gup: remove unused vmas parameter from pin_user_pages_remote()")
|
||||
* in linux-next, expected in v6.5-rc1 (2023-05-14)
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT)
|
||||
#if defined (NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK)
|
||||
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
|
||||
#if defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
|
||||
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
|
||||
#elif defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS)
|
||||
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
pin_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
|
||||
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK
|
||||
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE
|
||||
#endif // NV_PIN_USER_PAGES_REMOTE_PRESENT
|
||||
@@ -135,57 +163,63 @@ typedef int vm_fault_t;
|
||||
* commit 64019a2e467a ("mm/gup: remove task_struct pointer for
|
||||
* all gup code") in v5.9-rc1 (2020-08-11).
|
||||
*
|
||||
* Removed vmas parameter from get_user_pages_remote() by commit a4bde14d549
|
||||
* ("mm/gup: remove vmas parameter from get_user_pages_remote()")
|
||||
* in linux-next, expected in v6.5-rc1 (2023-05-14)
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
|
||||
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas)
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL)
|
||||
|
||||
#else
|
||||
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE
|
||||
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int flags,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
int *locked)
|
||||
{
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
return get_user_pages_remote(NULL, mm, start, nr_pages, write, force,
|
||||
pages, vmas);
|
||||
pages, NULL);
|
||||
}
|
||||
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
|
||||
#else
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE)
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS)
|
||||
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int flags,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
int *locked)
|
||||
{
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, vmas);
|
||||
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, NULL);
|
||||
}
|
||||
|
||||
#else
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages(NULL, mm, start, nr_pages, flags, pages, vmas)
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages(NULL, mm, start, nr_pages, flags, pages, NULL)
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT
|
||||
|
||||
/*
|
||||
@@ -261,4 +295,22 @@ static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
|
||||
{
|
||||
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
|
||||
vm_flags_set(vma, flags);
|
||||
#else
|
||||
vma->vm_flags |= flags;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags)
|
||||
{
|
||||
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
|
||||
vm_flags_clear(vma, flags);
|
||||
#else
|
||||
vma->vm_flags &= ~flags;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // __NV_MM_H__
|
||||
|
||||
@@ -27,6 +27,9 @@
|
||||
#include <linux/pci.h>
|
||||
#include "nv-linux.h"
|
||||
|
||||
#define NV_GPU_BAR1 1
|
||||
#define NV_GPU_BAR3 3
|
||||
|
||||
int nv_pci_register_driver(void);
|
||||
void nv_pci_unregister_driver(void);
|
||||
int nv_pci_count_devices(void);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -60,6 +60,7 @@ static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot)
|
||||
#endif /* !defined(NV_VMWARE) */
|
||||
|
||||
#if defined(NVCPU_AARCH64)
|
||||
extern NvBool nvos_is_chipset_io_coherent(void);
|
||||
/*
|
||||
* Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit
|
||||
* ARM that's not for system memory, but device memory instead. For I/O cache
|
||||
@@ -119,6 +120,13 @@ static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot)
|
||||
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
|
||||
#define NV_PGPROT_READ_ONLY(old_prot) \
|
||||
__pgprot(pgprot_val((old_prot)) & ~NV_PAGE_RW)
|
||||
#elif defined(NVCPU_RISCV64)
|
||||
#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \
|
||||
pgprot_writecombine(old_prot)
|
||||
/* Don't attempt to mark sysmem pages as write combined on riscv */
|
||||
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
|
||||
#define NV_PGPROT_READ_ONLY(old_prot) \
|
||||
__pgprot(pgprot_val((old_prot)) & ~_PAGE_WRITE)
|
||||
#else
|
||||
/* Writecombine is not supported */
|
||||
#undef NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot)
|
||||
|
||||
@@ -92,6 +92,24 @@ typedef struct file_operations nv_proc_ops_t;
|
||||
#endif
|
||||
|
||||
#define NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
|
||||
static ssize_t nv_procfs_read_lock_##name( \
|
||||
struct file *file, \
|
||||
char __user *buf, \
|
||||
size_t size, \
|
||||
loff_t *ppos \
|
||||
) \
|
||||
{ \
|
||||
int ret; \
|
||||
ret = nv_down_read_interruptible(&lock); \
|
||||
if (ret < 0) \
|
||||
{ \
|
||||
return ret; \
|
||||
} \
|
||||
size = seq_read(file, buf, size, ppos); \
|
||||
up_read(&lock); \
|
||||
return size; \
|
||||
} \
|
||||
\
|
||||
static int nv_procfs_open_##name( \
|
||||
struct inode *inode, \
|
||||
struct file *filep \
|
||||
@@ -104,11 +122,6 @@ typedef struct file_operations nv_proc_ops_t;
|
||||
{ \
|
||||
return ret; \
|
||||
} \
|
||||
ret = nv_down_read_interruptible(&lock); \
|
||||
if (ret < 0) \
|
||||
{ \
|
||||
single_release(inode, filep); \
|
||||
} \
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
@@ -117,7 +130,6 @@ typedef struct file_operations nv_proc_ops_t;
|
||||
struct file *filep \
|
||||
) \
|
||||
{ \
|
||||
up_read(&lock); \
|
||||
return single_release(inode, filep); \
|
||||
}
|
||||
|
||||
@@ -127,46 +139,7 @@ typedef struct file_operations nv_proc_ops_t;
|
||||
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
|
||||
NV_PROC_OPS_SET_OWNER() \
|
||||
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
|
||||
.NV_PROC_OPS_READ = seq_read, \
|
||||
.NV_PROC_OPS_LSEEK = seq_lseek, \
|
||||
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
|
||||
};
|
||||
|
||||
|
||||
#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_WRITE(name, lock, \
|
||||
write_callback) \
|
||||
NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
|
||||
\
|
||||
static ssize_t nv_procfs_write_##name( \
|
||||
struct file *file, \
|
||||
const char __user *buf, \
|
||||
size_t size, \
|
||||
loff_t *ppos \
|
||||
) \
|
||||
{ \
|
||||
ssize_t ret; \
|
||||
struct seq_file *s; \
|
||||
\
|
||||
s = file->private_data; \
|
||||
if (s == NULL) \
|
||||
{ \
|
||||
return -EIO; \
|
||||
} \
|
||||
\
|
||||
ret = write_callback(s, buf + *ppos, size - *ppos); \
|
||||
if (ret == 0) \
|
||||
{ \
|
||||
/* avoid infinite loop */ \
|
||||
ret = -EIO; \
|
||||
} \
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
|
||||
NV_PROC_OPS_SET_OWNER() \
|
||||
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
|
||||
.NV_PROC_OPS_READ = seq_read, \
|
||||
.NV_PROC_OPS_WRITE = nv_procfs_write_##name, \
|
||||
.NV_PROC_OPS_READ = nv_procfs_read_lock_##name, \
|
||||
.NV_PROC_OPS_LSEEK = seq_lseek, \
|
||||
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -25,10 +25,8 @@
|
||||
#define _NV_PROTO_H_
|
||||
|
||||
#include "nv-pci.h"
|
||||
#include "nv-register-module.h"
|
||||
|
||||
extern const char *nv_device_name;
|
||||
extern nvidia_module_t nv_fops;
|
||||
|
||||
void nv_acpi_register_notifier (nv_linux_state_t *);
|
||||
void nv_acpi_unregister_notifier (nv_linux_state_t *);
|
||||
@@ -86,8 +84,11 @@ void nv_shutdown_adapter(nvidia_stack_t *, nv_state_t *, nv_linux_state
|
||||
void nv_dev_free_stacks(nv_linux_state_t *);
|
||||
NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *);
|
||||
void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *);
|
||||
void nv_linux_add_device_locked(nv_linux_state_t *);
|
||||
int nv_linux_add_device_locked(nv_linux_state_t *);
|
||||
void nv_linux_remove_device_locked(nv_linux_state_t *);
|
||||
NvBool nv_acpi_power_resource_method_present(struct pci_dev *);
|
||||
|
||||
int nv_linux_init_open_q(nv_linux_state_t *);
|
||||
void nv_linux_stop_open_q(nv_linux_state_t *);
|
||||
|
||||
#endif /* _NV_PROTO_H_ */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -42,6 +42,7 @@
|
||||
#include <nv-caps.h>
|
||||
#include <nv-firmware.h>
|
||||
#include <nv-ioctl.h>
|
||||
#include <nv-ioctl-numa.h>
|
||||
#include <nvmisc.h>
|
||||
|
||||
extern nv_cap_t *nvidia_caps_root;
|
||||
@@ -50,9 +51,6 @@ extern const NvBool nv_is_rm_firmware_supported_os;
|
||||
|
||||
#include <nv-kernel-interface-api.h>
|
||||
|
||||
/* NVIDIA's reserved major character device number (Linux). */
|
||||
#define NV_MAJOR_DEVICE_NUMBER 195
|
||||
|
||||
#define GPU_UUID_LEN (16)
|
||||
|
||||
/*
|
||||
@@ -223,7 +221,6 @@ typedef struct
|
||||
#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1)
|
||||
|
||||
#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT)
|
||||
#define NV_RM_PAGES_PER_OS_PAGE (1U << NV_RM_TO_OS_PAGE_SHIFT)
|
||||
#define NV_RM_PAGES_TO_OS_PAGES(count) \
|
||||
((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \
|
||||
((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0))
|
||||
@@ -315,6 +312,7 @@ typedef enum
|
||||
NV_SOC_IRQ_DPAUX_TYPE,
|
||||
NV_SOC_IRQ_GPIO_TYPE,
|
||||
NV_SOC_IRQ_HDACODEC_TYPE,
|
||||
NV_SOC_IRQ_TCPC2DISP_TYPE,
|
||||
NV_SOC_IRQ_INVALID_TYPE
|
||||
} nv_soc_irq_type_t;
|
||||
|
||||
@@ -329,6 +327,7 @@ typedef struct nv_soc_irq_info_s {
|
||||
NvU32 gpio_num;
|
||||
NvU32 dpaux_instance;
|
||||
} irq_data;
|
||||
NvS32 ref_count;
|
||||
} nv_soc_irq_info_t;
|
||||
|
||||
#define NV_MAX_SOC_IRQS 6
|
||||
@@ -345,6 +344,12 @@ typedef struct nv_soc_irq_info_s {
|
||||
/* DMA-capable device data, defined by kernel interface layer */
|
||||
typedef struct nv_dma_device nv_dma_device_t;
|
||||
|
||||
typedef struct nv_phys_addr_range
|
||||
{
|
||||
NvU64 addr;
|
||||
NvU64 len;
|
||||
} nv_phys_addr_range_t;
|
||||
|
||||
typedef struct nv_state_t
|
||||
{
|
||||
void *priv; /* private data */
|
||||
@@ -384,9 +389,11 @@ typedef struct nv_state_t
|
||||
NvS32 current_soc_irq;
|
||||
NvU32 num_soc_irqs;
|
||||
NvU32 hdacodec_irq;
|
||||
NvU32 tcpc2disp_irq;
|
||||
NvU8 *soc_dcb_blob;
|
||||
NvU32 soc_dcb_size;
|
||||
NvU32 disp_sw_soc_chip_id;
|
||||
NvBool soc_is_dpalt_mode_supported;
|
||||
|
||||
NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
|
||||
NvU32 igpu_nonstall_irq;
|
||||
@@ -459,20 +466,33 @@ typedef struct nv_state_t
|
||||
NvHandle hDisp;
|
||||
} rmapi;
|
||||
|
||||
/* Bool to check if ISO iommu enabled */
|
||||
NvBool iso_iommu_present;
|
||||
|
||||
/* Bool to check if dma-buf is supported */
|
||||
NvBool dma_buf_supported;
|
||||
|
||||
NvBool printed_openrm_enable_unsupported_gpus_error;
|
||||
|
||||
/* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
|
||||
NvBool nvpcf_dsm_in_gpu_scope;
|
||||
|
||||
/* Bool to check if the device received a shutdown notification */
|
||||
NvBool is_shutdown;
|
||||
|
||||
/* Bool to check if the GPU has a coherent sysmem link */
|
||||
NvBool coherent;
|
||||
|
||||
/*
|
||||
* NUMA node ID of the CPU to which the GPU is attached.
|
||||
* Holds NUMA_NO_NODE on platforms that don't support NUMA configuration.
|
||||
*/
|
||||
NvS32 cpu_numa_node_id;
|
||||
|
||||
struct {
|
||||
/* Bool to check if ISO iommu enabled */
|
||||
NvBool iso_iommu_present;
|
||||
/* Bool to check if NISO iommu enabled */
|
||||
NvBool niso_iommu_present;
|
||||
/* Display SMMU Stream IDs */
|
||||
NvU32 dispIsoStreamId;
|
||||
NvU32 dispNisoStreamId;
|
||||
} iommus;
|
||||
} nv_state_t;
|
||||
|
||||
// These define need to be in sync with defines in system.h
|
||||
@@ -490,17 +510,26 @@ struct nv_file_private_t
|
||||
NvHandle *handles;
|
||||
NvU16 maxHandles;
|
||||
NvU32 deviceInstance;
|
||||
NvU32 gpuInstanceId;
|
||||
NvU8 metadata[64];
|
||||
|
||||
nv_file_private_t *ctl_nvfp;
|
||||
void *ctl_nvfp_priv;
|
||||
NvU32 register_or_refcount;
|
||||
|
||||
//
|
||||
// True if a client or an event was ever allocated on this fd.
|
||||
// If false, RMAPI cleanup is skipped.
|
||||
//
|
||||
NvBool bCleanupRmapi;
|
||||
};
|
||||
|
||||
// Forward define the gpu ops structures
|
||||
typedef struct gpuSession *nvgpuSessionHandle_t;
|
||||
typedef struct gpuDevice *nvgpuDeviceHandle_t;
|
||||
typedef struct gpuAddressSpace *nvgpuAddressSpaceHandle_t;
|
||||
typedef struct gpuTsg *nvgpuTsgHandle_t;
|
||||
typedef struct UvmGpuTsgAllocParams_tag nvgpuTsgAllocParams_t;
|
||||
typedef struct gpuChannel *nvgpuChannelHandle_t;
|
||||
typedef struct UvmGpuChannelInfo_tag *nvgpuChannelInfo_t;
|
||||
typedef struct UvmGpuChannelAllocParams_tag nvgpuChannelAllocParams_t;
|
||||
@@ -527,7 +556,7 @@ typedef struct UvmGpuPagingChannelAllocParams_tag nvgpuPagingChannelAllocPara
|
||||
typedef struct UvmGpuPagingChannel_tag *nvgpuPagingChannelHandle_t;
|
||||
typedef struct UvmGpuPagingChannelInfo_tag *nvgpuPagingChannelInfo_t;
|
||||
typedef enum UvmPmaGpuMemoryType_tag nvgpuGpuMemoryType_t;
|
||||
typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU32, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t);
|
||||
typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU64, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t);
|
||||
typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemoryType_t);
|
||||
|
||||
/*
|
||||
@@ -592,9 +621,19 @@ typedef enum
|
||||
#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \
|
||||
(((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
|
||||
|
||||
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
|
||||
((nv)->iso_iommu_present)
|
||||
/*
|
||||
* For console setup by EFI GOP, the base address is BAR1.
|
||||
* For console setup by VBIOS, the base address is BAR2 + 16MB.
|
||||
*/
|
||||
#define NV_IS_CONSOLE_MAPPED(nv, addr) \
|
||||
(((addr) == (nv)->bars[NV_GPU_BAR_INDEX_FB].cpu_address) || \
|
||||
((addr) == ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000)))
|
||||
|
||||
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
|
||||
((nv)->iommus.iso_iommu_present)
|
||||
|
||||
#define NV_SOC_IS_NISO_IOMMU_PRESENT(nv) \
|
||||
((nv)->iommus.niso_iommu_present)
|
||||
/*
|
||||
* GPU add/remove events
|
||||
*/
|
||||
@@ -649,7 +688,8 @@ static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
|
||||
|
||||
static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
|
||||
{
|
||||
return ((nv->fb) && (offset >= nv->fb->cpu_address) &&
|
||||
return ((nv->fb) && (nv->fb->size != 0) &&
|
||||
(offset >= nv->fb->cpu_address) &&
|
||||
((offset + (length - 1)) >= offset) &&
|
||||
((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
|
||||
}
|
||||
@@ -739,7 +779,7 @@ nv_state_t* NV_API_CALL nv_get_ctl_state (void);
|
||||
void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 );
|
||||
|
||||
NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **);
|
||||
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvU64 *, void **);
|
||||
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvU64, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
|
||||
NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **);
|
||||
@@ -756,8 +796,6 @@ NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64,
|
||||
void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **);
|
||||
NV_STATUS NV_API_CALL nv_dma_map_pages (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **);
|
||||
NV_STATUS NV_API_CALL nv_dma_unmap_pages (nv_dma_device_t *, NvU64, NvU64 *, void **);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **);
|
||||
NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **);
|
||||
@@ -807,7 +845,8 @@ void NV_API_CALL nv_put_firmware(const void *);
|
||||
nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
|
||||
void NV_API_CALL nv_put_file_private(void *);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
|
||||
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
|
||||
NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
|
||||
@@ -847,15 +886,17 @@ NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
|
||||
NvU32 NV_API_CALL nv_get_os_type(void);
|
||||
|
||||
void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
|
||||
void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64 *);
|
||||
|
||||
struct dma_buf;
|
||||
typedef struct nv_dma_buf nv_dma_buf_t;
|
||||
struct drm_gem_object;
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *);
|
||||
void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *);
|
||||
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
|
||||
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
|
||||
void NV_API_CALL nv_dma_release_dma_buf (void *, nv_dma_buf_t *);
|
||||
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, struct sg_table **, nv_dma_buf_t **);
|
||||
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, struct sg_table **, nv_dma_buf_t **);
|
||||
void NV_API_CALL nv_dma_release_dma_buf (nv_dma_buf_t *);
|
||||
|
||||
void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *);
|
||||
|
||||
@@ -871,6 +912,8 @@ typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *)
|
||||
NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *);
|
||||
NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *);
|
||||
|
||||
void NV_API_CALL nv_get_disp_smmu_stream_ids (nv_state_t *, NvU32 *, NvU32 *);
|
||||
|
||||
/*
|
||||
* ---------------------------------------------------------------------------
|
||||
*
|
||||
@@ -897,6 +940,7 @@ NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *
|
||||
NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *);
|
||||
void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *);
|
||||
void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *);
|
||||
NvBool NV_API_CALL rm_is_msix_allowed (nvidia_stack_t *, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
|
||||
NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);
|
||||
@@ -915,6 +959,8 @@ NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *
|
||||
void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *);
|
||||
char* NV_API_CALL rm_remove_spaces (const char *);
|
||||
char* NV_API_CALL rm_string_token (char **, const char);
|
||||
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
|
||||
NV_STATUS NV_API_CALL rm_get_adapter_status_external(nvidia_stack_t *, nv_state_t *);
|
||||
|
||||
NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *);
|
||||
void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *);
|
||||
@@ -937,6 +983,8 @@ NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU
|
||||
|
||||
void NV_API_CALL rm_power_source_change_event (nvidia_stack_t *, NvU32);
|
||||
|
||||
void NV_API_CALL rm_request_dnotifier_state (nvidia_stack_t *, nv_state_t *);
|
||||
|
||||
void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64);
|
||||
@@ -946,25 +994,24 @@ NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, N
|
||||
NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU32, NvU32, NvU64 *, void **);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
|
||||
void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, NvU64 *);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, void *, nv_phys_addr_range_t **, NvU32 *);
|
||||
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, nv_phys_addr_range_t **, NvU32);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
|
||||
void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
|
||||
NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *);
|
||||
|
||||
void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
|
||||
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
|
||||
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(nvidia_stack_t *, nv_state_t *, NvU32 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
|
||||
NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
|
||||
NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
|
||||
NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
|
||||
void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, NvS32 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *);
|
||||
NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, nv_ioctl_numa_info_t *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *);
|
||||
NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
|
||||
@@ -979,26 +1026,27 @@ void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_
|
||||
void NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
|
||||
void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
|
||||
NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool);
|
||||
NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool, NvBool *);
|
||||
const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *);
|
||||
const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *);
|
||||
const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
|
||||
|
||||
void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
|
||||
void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
|
||||
|
||||
NvBool NV_API_CALL rm_is_altstack_in_use(void);
|
||||
|
||||
/* vGPU VFIO specific functions */
|
||||
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32, NvBool *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
|
||||
NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *);
|
||||
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *);
|
||||
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *);
|
||||
NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*);
|
||||
nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);
|
||||
|
||||
@@ -86,7 +86,7 @@
|
||||
/* Not currently implemented for MSVC/ARM64. See bug 3366890. */
|
||||
# define nv_speculation_barrier()
|
||||
# define speculation_barrier() nv_speculation_barrier()
|
||||
#elif defined(NVCPU_NVRISCV64) && NVOS_IS_LIBOS
|
||||
#elif defined(NVCPU_IS_RISCV64)
|
||||
# define nv_speculation_barrier()
|
||||
#else
|
||||
#error "Unknown compiler/chip family"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -62,10 +62,10 @@ typedef struct
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceRegisterGpu
|
||||
|
||||
Registers the GPU with the provided UUID for use. A GPU must be registered
|
||||
before its UUID can be used with any other API. This call is ref-counted so
|
||||
every nvUvmInterfaceRegisterGpu must be paired with a corresponding
|
||||
nvUvmInterfaceUnregisterGpu.
|
||||
Registers the GPU with the provided physical UUID for use. A GPU must be
|
||||
registered before its UUID can be used with any other API. This call is
|
||||
ref-counted so every nvUvmInterfaceRegisterGpu must be paired with a
|
||||
corresponding nvUvmInterfaceUnregisterGpu.
|
||||
|
||||
You don't need to call nvUvmInterfaceSessionCreate before calling this.
|
||||
|
||||
@@ -79,12 +79,13 @@ NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatfo
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceUnregisterGpu
|
||||
|
||||
Unregisters the GPU with the provided UUID. This drops the ref count from
|
||||
nvUvmInterfaceRegisterGpu. Once the reference count goes to 0 the device may
|
||||
no longer be accessible until the next nvUvmInterfaceRegisterGpu call. No
|
||||
automatic resource freeing is performed, so only make the last unregister
|
||||
call after destroying all your allocations associated with that UUID (such
|
||||
as those from nvUvmInterfaceAddressSpaceCreate).
|
||||
Unregisters the GPU with the provided physical UUID. This drops the ref
|
||||
count from nvUvmInterfaceRegisterGpu. Once the reference count goes to 0
|
||||
the device may no longer be accessible until the next
|
||||
nvUvmInterfaceRegisterGpu call. No automatic resource freeing is performed,
|
||||
so only make the last unregister call after destroying all your allocations
|
||||
associated with that UUID (such as those from
|
||||
nvUvmInterfaceAddressSpaceCreate).
|
||||
|
||||
If the UUID is not found, no operation is performed.
|
||||
*/
|
||||
@@ -121,10 +122,10 @@ NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session);
|
||||
nvUvmInterfaceDeviceCreate
|
||||
|
||||
Creates a device object under the given session for the GPU with the given
|
||||
UUID. Also creates a partition object for the device iff bCreateSmcPartition
|
||||
is true and pGpuInfo->smcEnabled is true. pGpuInfo->smcUserClientInfo will
|
||||
be used to determine the SMC partition in this case. A device handle is
|
||||
returned in the device output parameter.
|
||||
physical UUID. Also creates a partition object for the device iff
|
||||
bCreateSmcPartition is true and pGpuInfo->smcEnabled is true.
|
||||
pGpuInfo->smcUserClientInfo will be used to determine the SMC partition in
|
||||
this case. A device handle is returned in the device output parameter.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_GENERIC
|
||||
@@ -161,6 +162,7 @@ void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device);
|
||||
NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device,
|
||||
unsigned long long vaBase,
|
||||
unsigned long long vaSize,
|
||||
NvBool enableAts,
|
||||
uvmGpuAddressSpaceHandle *vaSpace,
|
||||
UvmGpuAddressSpaceInfo *vaSpaceInfo);
|
||||
|
||||
@@ -327,7 +329,7 @@ NV_STATUS nvUvmInterfaceGetPmaObject(uvmGpuDeviceHandle device,
|
||||
|
||||
// Mirrors pmaEvictPagesCb_t, see its documentation in pma.h.
|
||||
typedef NV_STATUS (*uvmPmaEvictPagesCallback)(void *callbackData,
|
||||
NvU32 pageSize,
|
||||
NvU64 pageSize,
|
||||
NvU64 *pPages,
|
||||
NvU32 count,
|
||||
NvU64 physBegin,
|
||||
@@ -390,7 +392,7 @@ void nvUvmInterfacePmaUnregisterEvictionCallbacks(void *pPma);
|
||||
*/
|
||||
NV_STATUS nvUvmInterfacePmaAllocPages(void *pPma,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize,
|
||||
NvU64 pageSize,
|
||||
UvmPmaAllocationOptions *pPmaAllocOptions,
|
||||
NvU64 *pPages);
|
||||
|
||||
@@ -419,36 +421,9 @@ NV_STATUS nvUvmInterfacePmaAllocPages(void *pPma,
|
||||
NV_STATUS nvUvmInterfacePmaPinPages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize,
|
||||
NvU64 pageSize,
|
||||
NvU32 flags);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfacePmaUnpinPages
|
||||
|
||||
This function will unpin the physical memory allocated using PMA. The pages
|
||||
passed as input must be already pinned, else this function will return an
|
||||
error and rollback any change if any page is not previously marked "pinned".
|
||||
Behaviour is undefined if any blacklisted pages are unpinned.
|
||||
|
||||
Arguments:
|
||||
pPma[IN] - Pointer to PMA object.
|
||||
pPages[IN] - Array of pointers, containing the PA base
|
||||
address of each page to be unpinned.
|
||||
pageCount [IN] - Number of pages required to be unpinned.
|
||||
pageSize [IN] - Page size of each page to be unpinned.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT - Invalid input arguments.
|
||||
NV_ERR_GENERIC - Unexpected error. We try hard to avoid
|
||||
returning this error code as is not very
|
||||
informative.
|
||||
NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB
|
||||
*/
|
||||
NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceMemoryFree
|
||||
|
||||
@@ -488,7 +463,7 @@ void nvUvmInterfaceMemoryFree(uvmGpuAddressSpaceHandle vaSpace,
|
||||
void nvUvmInterfacePmaFreePages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize,
|
||||
NvU64 pageSize,
|
||||
NvU32 flags);
|
||||
|
||||
/*******************************************************************************
|
||||
@@ -507,7 +482,7 @@ void nvUvmInterfacePmaFreePages(void *pPma,
|
||||
NV_STATUS nvUvmInterfaceMemoryCpuMap(uvmGpuAddressSpaceHandle vaSpace,
|
||||
UvmGpuPointer gpuPointer,
|
||||
NvLength length, void **cpuPtr,
|
||||
NvU32 pageSize);
|
||||
NvU64 pageSize);
|
||||
|
||||
/*******************************************************************************
|
||||
uvmGpuMemoryCpuUnmap
|
||||
@@ -517,16 +492,59 @@ NV_STATUS nvUvmInterfaceMemoryCpuMap(uvmGpuAddressSpaceHandle vaSpace,
|
||||
void nvUvmInterfaceMemoryCpuUnMap(uvmGpuAddressSpaceHandle vaSpace,
|
||||
void *cpuPtr);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceTsgAllocate
|
||||
|
||||
This function allocates a Time-Slice Group (TSG).
|
||||
|
||||
allocParams must contain an engineIndex as TSGs need to be bound to an
|
||||
engine type at allocation time. The possible values are [0,
|
||||
UVM_COPY_ENGINE_COUNT_MAX) for CE engine type. Notably only the copy engines
|
||||
that have UvmGpuCopyEngineCaps::supported set to true can be allocated.
|
||||
|
||||
Note that TSG is not supported on all GPU architectures for all engine
|
||||
types, e.g., pre-Volta GPUs only support TSG for the GR/Compute engine type.
|
||||
On devices that do not support HW TSGs on the requested engine, this API is
|
||||
still required, i.e., a TSG handle is required in
|
||||
nvUvmInterfaceChannelAllocate(), due to information stored in it necessary
|
||||
for channel allocation. However, when HW TSGs aren't supported, a TSG handle
|
||||
is essentially a "fake" TSG with no HW scheduling impact.
|
||||
|
||||
tsg is filled with the address of the corresponding TSG handle.
|
||||
|
||||
Arguments:
|
||||
vaSpace[IN] - VA space linked to a client and a device under which
|
||||
the TSG is allocated.
|
||||
allocParams[IN] - structure with allocation settings.
|
||||
tsg[OUT] - pointer to the new TSG handle.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_GENERIC
|
||||
NV_ERR_INVALID_ARGUMENT
|
||||
NV_ERR_NO_MEMORY
|
||||
NV_ERR_NOT_SUPPORTED
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceTsgAllocate(uvmGpuAddressSpaceHandle vaSpace,
|
||||
const UvmGpuTsgAllocParams *allocParams,
|
||||
uvmGpuTsgHandle *tsg);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceTsgDestroy
|
||||
|
||||
This function destroys a given TSG.
|
||||
|
||||
Arguments:
|
||||
tsg[IN] - Tsg handle
|
||||
*/
|
||||
void nvUvmInterfaceTsgDestroy(uvmGpuTsgHandle tsg);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceChannelAllocate
|
||||
|
||||
This function will allocate a channel bound to a copy engine
|
||||
This function will allocate a channel bound to a copy engine(CE) or a SEC2
|
||||
engine.
|
||||
|
||||
allocParams must contain an engineIndex as channels need to be bound to an
|
||||
engine type at allocation time. The possible values are [0,
|
||||
UVM_COPY_ENGINE_COUNT_MAX), but notably only the copy engines that have
|
||||
UvmGpuCopyEngineCaps::supported set to true can be allocated. This struct
|
||||
also contains information relative to GPFIFO and GPPut.
|
||||
allocParams contains information relative to GPFIFO and GPPut.
|
||||
|
||||
channel is filled with the address of the corresponding channel handle.
|
||||
|
||||
@@ -536,17 +554,18 @@ void nvUvmInterfaceMemoryCpuUnMap(uvmGpuAddressSpaceHandle vaSpace,
|
||||
Host channel submission doorbell.
|
||||
|
||||
Arguments:
|
||||
vaSpace[IN] - VA space linked to a client and a device under which
|
||||
the channel will be allocated
|
||||
tsg[IN] - Time-Slice Group that the channel will be a member.
|
||||
allocParams[IN] - structure with allocation settings
|
||||
channel[OUT] - pointer to the new channel handle
|
||||
channelInfo[OUT] - structure filled with channel information
|
||||
|
||||
Error codes:
|
||||
NV_ERR_GENERIC
|
||||
NV_ERR_INVALID_ARGUMENT
|
||||
NV_ERR_NO_MEMORY
|
||||
NV_ERR_NOT_SUPPORTED
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceChannelAllocate(uvmGpuAddressSpaceHandle vaSpace,
|
||||
NV_STATUS nvUvmInterfaceChannelAllocate(const uvmGpuTsgHandle tsg,
|
||||
const UvmGpuChannelAllocParams *allocParams,
|
||||
uvmGpuChannelHandle *channel,
|
||||
UvmGpuChannelInfo *channelInfo);
|
||||
@@ -554,7 +573,7 @@ NV_STATUS nvUvmInterfaceChannelAllocate(uvmGpuAddressSpaceHandle vaSpace,
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceChannelDestroy
|
||||
|
||||
This function destroys a given channel
|
||||
This function destroys a given channel.
|
||||
|
||||
Arguments:
|
||||
channel[IN] - channel handle
|
||||
@@ -573,9 +592,16 @@ void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel);
|
||||
Error codes:
|
||||
NV_ERR_GENERIC
|
||||
NV_ERR_NO_MEMORY
|
||||
NV_ERR_INVALID_STATE
|
||||
NV_ERR_NOT_SUPPORTED
|
||||
NV_ERR_NOT_READY
|
||||
NV_ERR_INVALID_LOCK_STATE
|
||||
NV_ERR_INVALID_STATE
|
||||
NV_ERR_NVSWITCH_FABRIC_NOT_READY
|
||||
NV_ERR_NVSWITCH_FABRIC_FAILURE
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device,
|
||||
UvmGpuCaps * caps);
|
||||
UvmGpuCaps *caps);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceQueryCopyEnginesCaps
|
||||
@@ -594,6 +620,8 @@ NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device,
|
||||
nvUvmInterfaceGetGpuInfo
|
||||
|
||||
Return various gpu info, refer to the UvmGpuInfo struct for details.
|
||||
The input UUID is for the physical GPU and the pGpuClientInfo identifies
|
||||
the SMC partition if SMC is enabled and the partition exists.
|
||||
If no gpu matching the uuid is found, an error will be returned.
|
||||
|
||||
On Ampere+ GPUs, pGpuClientInfo contains SMC information provided by the
|
||||
@@ -601,6 +629,9 @@ NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device,
|
||||
|
||||
Error codes:
|
||||
NV_ERR_GENERIC
|
||||
NV_ERR_NO_MEMORY
|
||||
NV_ERR_GPU_UUID_NOT_FOUND
|
||||
NV_ERR_INSUFFICIENT_PERMISSIONS
|
||||
NV_ERR_INSUFFICIENT_RESOURCES
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid,
|
||||
@@ -813,7 +844,7 @@ NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device,
|
||||
UVM GPU UNLOCK
|
||||
|
||||
Arguments:
|
||||
gpuUuid[IN] - UUID of the GPU to operate on
|
||||
device[IN] - Device handle associated with the gpu
|
||||
bOwnInterrupts - Set to NV_TRUE for UVM to take ownership of the
|
||||
replayable page fault interrupts. Set to NV_FALSE
|
||||
to return ownership of the page fault interrupts
|
||||
@@ -921,6 +952,54 @@ NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo,
|
||||
void *pFaultBuffer,
|
||||
NvU32 *numFaults);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceFlushReplayableFaultBuffer
|
||||
|
||||
This function sends an RPC to GSP in order to flush the HW replayable fault buffer.
|
||||
|
||||
NOTES:
|
||||
- This function DOES NOT acquire the RM API or GPU locks. That is because
|
||||
it is called during fault servicing, which could produce deadlocks.
|
||||
- This function should not be called when interrupts are disabled.
|
||||
|
||||
Arguments:
|
||||
pFaultInfo[IN] - information provided by RM for fault handling.
|
||||
used for obtaining the device handle without locks.
|
||||
bCopyAndFlush[IN] - Instructs RM to perform the flush in the Copy+Flush mode.
|
||||
In this mode, RM will perform a copy of the packets from
|
||||
the HW buffer to UVM's SW buffer as part of performing
|
||||
the flush. This mode gives UVM the opportunity to observe
|
||||
the packets contained within the HW buffer at the time
|
||||
of issuing the call.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(UvmGpuFaultInfo *pFaultInfo,
|
||||
NvBool bCopyAndFlush);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceTogglePrefetchFaults
|
||||
|
||||
This function sends an RPC to GSP in order to toggle the prefetch fault PRI.
|
||||
|
||||
NOTES:
|
||||
- This function DOES NOT acquire the RM API or GPU locks. That is because
|
||||
it is called during fault servicing, which could produce deadlocks.
|
||||
- This function should not be called when interrupts are disabled.
|
||||
|
||||
Arguments:
|
||||
pFaultInfo[IN] - Information provided by RM for fault handling.
|
||||
Used for obtaining the device handle without locks.
|
||||
bEnable[IN] - Instructs RM whether to toggle generating faults on
|
||||
prefetch on/off.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceTogglePrefetchFaults(UvmGpuFaultInfo *pFaultInfo,
|
||||
NvBool bEnable);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceInitAccessCntrInfo
|
||||
|
||||
@@ -929,13 +1008,15 @@ NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo,
|
||||
Arguments:
|
||||
device[IN] - Device handle associated with the gpu
|
||||
pAccessCntrInfo[OUT] - Information provided by RM for access counter handling
|
||||
accessCntrIndex[IN] - Access counter index
|
||||
|
||||
Error codes:
|
||||
NV_ERR_GENERIC
|
||||
NV_ERR_INVALID_ARGUMENT
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceInitAccessCntrInfo(uvmGpuDeviceHandle device,
|
||||
UvmGpuAccessCntrInfo *pAccessCntrInfo);
|
||||
UvmGpuAccessCntrInfo *pAccessCntrInfo,
|
||||
NvU32 accessCntrIndex);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceDestroyAccessCntrInfo
|
||||
@@ -1024,7 +1105,8 @@ void nvUvmInterfaceDeRegisterUvmOps(void);
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT
|
||||
NV_ERR_OBJECT_NOT_FOUND : If device object associated with the uuids aren't found.
|
||||
NV_ERR_OBJECT_NOT_FOUND : If device object associated with the device
|
||||
handles isn't found.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1,
|
||||
uvmGpuDeviceHandle device2,
|
||||
@@ -1054,11 +1136,13 @@ void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session,
|
||||
hMemory[IN] - Memory handle.
|
||||
offset [IN] - Offset from the beginning of the allocation
|
||||
where PTE mappings should begin.
|
||||
Should be aligned with pagesize associated
|
||||
Should be aligned with mappingPagesize
|
||||
in gpuExternalMappingInfo associated
|
||||
with the allocation.
|
||||
size [IN] - Length of the allocation for which PTEs
|
||||
should be built.
|
||||
Should be aligned with pagesize associated
|
||||
Should be aligned with mappingPagesize
|
||||
in gpuExternalMappingInfo associated
|
||||
with the allocation.
|
||||
size = 0 will be interpreted as the total size
|
||||
of the allocation.
|
||||
@@ -1075,6 +1159,8 @@ void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session,
|
||||
NV_ERR_NOT_READY - Returned when querying the PTEs requires a deferred setup
|
||||
which has not yet completed. It is expected that the caller
|
||||
will reattempt the call until a different code is returned.
|
||||
As an example, multi-node systems which require querying
|
||||
PTEs from the Fabric Manager may return this code.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace,
|
||||
NvHandle hMemory,
|
||||
@@ -1383,4 +1469,329 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
|
||||
char *methodStream,
|
||||
NvU32 methodStreamSize);
|
||||
|
||||
/*******************************************************************************
|
||||
Cryptography Services Library (CSL) Interface
|
||||
*/
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslInitContext
|
||||
|
||||
Allocates and initializes a CSL context for a given secure channel.
|
||||
|
||||
The lifetime of the context is the same as the lifetime of the secure channel
|
||||
it is paired with.
|
||||
|
||||
Locking: This function acquires an API lock.
|
||||
Memory : This function dynamically allocates memory.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
|
||||
channel[IN] - Handle to a secure channel.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_STATE - The system is not operating in Confidential Compute mode.
|
||||
NV_ERR_INVALID_CHANNEL - The associated channel is not a secure channel.
|
||||
NV_ERR_IN_USE - The context has already been initialized.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext,
|
||||
uvmGpuChannelHandle channel);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceDeinitCslContext
|
||||
|
||||
Securely deinitializes and clears the contents of a context.
|
||||
|
||||
If context is already deinitialized then function returns immediately.
|
||||
|
||||
Locking: This function does not acquire an API or GPU lock.
|
||||
Memory : This function may free memory.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN] - The CSL context associated with a channel.
|
||||
*/
|
||||
void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslRotateKey
|
||||
|
||||
Disables channels and rotates keys.
|
||||
|
||||
This function disables channels and rotates associated keys. The channels
|
||||
associated with the given CSL contexts must be idled before this function is
|
||||
called. To trigger key rotation all allocated channels for a given key must
|
||||
be present in the list. If the function returns successfully then the CSL
|
||||
contexts have been updated with the new key.
|
||||
|
||||
Locking: This function attempts to acquire the GPU lock. In case of failure
|
||||
to acquire the return code is NV_ERR_STATE_IN_USE. The caller must
|
||||
guarantee that no CSL function, including this one, is invoked
|
||||
concurrently with the CSL contexts in contextList.
|
||||
Memory : This function dynamically allocates memory.
|
||||
|
||||
Arguments:
|
||||
contextList[IN/OUT] - An array of pointers to CSL contexts.
|
||||
contextListCount[IN] - Number of CSL contexts in contextList. Its value
|
||||
must be greater than 0.
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT - contextList is NULL or contextListCount is 0.
|
||||
NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller
|
||||
can retry at a later time.
|
||||
NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred
|
||||
when attempting to acquire a lock.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslRotateKey(UvmCslContext *contextList[],
|
||||
NvU32 contextListCount);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslRotateIv
|
||||
|
||||
Rotates the IV for a given channel and operation.
|
||||
|
||||
This function will rotate the IV on both the CPU and the GPU.
|
||||
For a given operation the channel must be idle before calling this function.
|
||||
This function can be called regardless of the value of the IV's message counter.
|
||||
|
||||
Locking: This function attempts to acquire the GPU lock. In case of failure to
|
||||
acquire the return code is NV_ERR_STATE_IN_USE. The caller must guarantee
|
||||
that no CSL function, including this one, is invoked concurrently with
|
||||
the same CSL context.
|
||||
Memory : This function does not dynamically allocate memory.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
|
||||
operation[IN] - Either
|
||||
- UVM_CSL_OPERATION_ENCRYPT
|
||||
- UVM_CSL_OPERATION_DECRYPT
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INSUFFICIENT_RESOURCES - The rotate operation would cause a counter
|
||||
to overflow.
|
||||
NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller
|
||||
can retry at a later time.
|
||||
NV_ERR_INVALID_ARGUMENT - Invalid value for operation.
|
||||
NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred
|
||||
when attempting to acquire a lock.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslEncrypt
|
||||
|
||||
Encrypts data and produces an authentication tag.
|
||||
|
||||
Auth, input, and output buffers must not overlap. If they do then calling
|
||||
this function produces undefined behavior. Performance is typically
|
||||
maximized when the input and output buffers are 16-byte aligned. This is
|
||||
natural alignment for AES block.
|
||||
The encryptIV can be obtained from nvUvmInterfaceCslIncrementIv.
|
||||
However, it is optional. If it is NULL, the next IV in line will be used.
|
||||
|
||||
Locking: This function does not acquire an API or GPU lock.
|
||||
The caller must guarantee that no CSL function, including this one,
|
||||
is invoked concurrently with the same CSL context.
|
||||
Memory : This function does not dynamically allocate memory.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
|
||||
bufferSize[IN] - Size of the input and output buffers in
|
||||
units of bytes. Value can range from 1 byte
|
||||
to (2^32) - 1 bytes.
|
||||
inputBuffer[IN] - Address of plaintext input buffer.
|
||||
encryptIv[IN/OUT] - IV to use for encryption. Can be NULL.
|
||||
outputBuffer[OUT] - Address of ciphertext output buffer.
|
||||
authTagBuffer[OUT] - Address of authentication tag buffer.
|
||||
Its size is UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel.
|
||||
- The size of the data is 0 bytes.
|
||||
- The encryptIv has already been used.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
UvmCslIv *encryptIv,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 *authTagBuffer);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslDecrypt
|
||||
|
||||
Verifies the authentication tag and decrypts data.
|
||||
|
||||
Auth, input, and output buffers must not overlap. If they do then calling
|
||||
this function produces undefined behavior. Performance is typically
|
||||
maximized when the input and output buffers are 16-byte aligned. This is
|
||||
natural alignment for AES block.
|
||||
|
||||
During a key rotation event the previous key is stored in the CSL context.
|
||||
This allows data encrypted by the GPU to be decrypted with the previous key.
|
||||
The keyRotationId parameter identifies which key is used. The first key rotation
|
||||
ID has a value of 0 that increments by one for each key rotation event.
|
||||
|
||||
Locking: This function does not acquire an API or GPU lock.
|
||||
The caller must guarantee that no CSL function, including this one,
|
||||
is invoked concurrently with the same CSL context.
|
||||
Memory : This function does not dynamically allocate memory.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
bufferSize[IN] - Size of the input and output buffers in units of bytes.
|
||||
Value can range from 1 byte to (2^32) - 1 bytes.
|
||||
decryptIv[IN] - IV used to decrypt the ciphertext. Its value can either be given by
|
||||
nvUvmInterfaceCslIncrementIv, or, if NULL, the CSL context's
|
||||
internal counter is used.
|
||||
keyRotationId[IN] - Specifies the key that is used for decryption.
|
||||
A value of NV_U32_MAX specifies the current key.
|
||||
inputBuffer[IN] - Address of ciphertext input buffer.
|
||||
outputBuffer[OUT] - Address of plaintext output buffer.
|
||||
addAuthData[IN] - Address of the plaintext additional authenticated data used to
|
||||
calculate the authentication tag. Can be NULL.
|
||||
addAuthDataSize[IN] - Size of the additional authenticated data in units of bytes.
|
||||
Value can range from 1 byte to (2^32) - 1 bytes.
|
||||
This parameter is ignored if addAuthData is NULL.
|
||||
authTagBuffer[IN] - Address of authentication tag buffer.
|
||||
Its size is UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INSUFFICIENT_RESOURCES - The decryption operation would cause a
|
||||
counter overflow to occur.
|
||||
NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes.
|
||||
NV_ERR_INVALID_DATA - Verification of the authentication tag fails.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
UvmCslIv const *decryptIv,
|
||||
NvU32 keyRotationId,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 const *addAuthData,
|
||||
NvU32 addAuthDataSize,
|
||||
NvU8 const *authTagBuffer);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslSign
|
||||
|
||||
Generates an authentication tag for secure work launch.
|
||||
|
||||
Auth and input buffers must not overlap. If they do then calling this function produces
|
||||
undefined behavior.
|
||||
|
||||
Locking: This function does not acquire an API or GPU lock.
|
||||
The caller must guarantee that no CSL function, including this one,
|
||||
is invoked concurrently with the same CSL context.
|
||||
Memory : This function does not dynamically allocate memory.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
|
||||
bufferSize[IN] - Size of the input buffer in units of bytes.
|
||||
Value can range from 1 byte to (2^32) - 1 bytes.
|
||||
inputBuffer[IN] - Address of plaintext input buffer.
|
||||
authTagBuffer[OUT] - Address of authentication tag buffer.
|
||||
Its size is UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INSUFFICIENT_RESOURCES - The signing operation would cause a counter overflow to occur.
|
||||
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel.
|
||||
- The size of the data is 0 bytes.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
NvU8 *authTagBuffer);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslQueryMessagePool
|
||||
|
||||
Returns the number of messages that can be encrypted before the message counter will overflow.
|
||||
|
||||
Locking: This function does not acquire an API or GPU lock.
|
||||
Memory : This function does not dynamically allocate memory.
|
||||
The caller must guarantee that no CSL function, including this one,
|
||||
is invoked concurrently with the same CSL context.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
operation[IN] - Either UVM_CSL_OPERATION_ENCRYPT or UVM_CSL_OPERATION_DECRYPT.
|
||||
messageNum[OUT] - Number of messages left before overflow.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation,
|
||||
NvU64 *messageNum);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslIncrementIv
|
||||
|
||||
Increments the message counter by the specified amount.
|
||||
|
||||
If iv is non-NULL then the incremented value is returned.
|
||||
If operation is UVM_CSL_OPERATION_ENCRYPT then the returned IV's "freshness" bit is set and
|
||||
can be used in nvUvmInterfaceCslEncrypt. If operation is UVM_CSL_OPERATION_DECRYPT then
|
||||
the returned IV can be used in nvUvmInterfaceCslDecrypt.
|
||||
|
||||
Locking: This function does not acquire an API or GPU lock.
|
||||
The caller must guarantee that no CSL function, including this one,
|
||||
is invoked concurrently with the same CSL context.
|
||||
Memory : This function does not dynamically allocate memory.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
operation[IN] - Either
|
||||
- UVM_CSL_OPERATION_ENCRYPT
|
||||
- UVM_CSL_OPERATION_DECRYPT
|
||||
increment[IN] - The amount by which the IV is incremented. Can be 0.
|
||||
iv[OUT] - If non-NULL, a buffer to store the incremented IV.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal.
|
||||
NV_ERR_INSUFFICIENT_RESOURCES - Incrementing the message counter would result
|
||||
in an overflow.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation,
|
||||
NvU64 increment,
|
||||
UvmCslIv *iv);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslLogEncryption
|
||||
|
||||
Checks and logs information about encryptions associated with the given
|
||||
CSL context.
|
||||
|
||||
For contexts associated with channels, this function does not modify elements of
|
||||
the UvmCslContext, and must be called for every CPU/GPU encryption.
|
||||
|
||||
For the context associated with fault buffers, bufferSize can encompass multiple
|
||||
encryption invocations, and the UvmCslContext will be updated following a key
|
||||
rotation event.
|
||||
|
||||
In either case the IV remains unmodified after this function is called.
|
||||
|
||||
Locking: This function does not acquire an API or GPU lock.
|
||||
Memory : This function does not dynamically allocate memory.
|
||||
The caller must guarantee that no CSL function, including this one,
|
||||
is invoked concurrently with the same CSL context.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
operation[IN] - If the CSL context is associated with a fault
|
||||
buffer, this argument is ignored. If it is
|
||||
associated with a channel, it must be either
|
||||
- UVM_CSL_OPERATION_ENCRYPT
|
||||
- UVM_CSL_OPERATION_DECRYPT
|
||||
bufferSize[IN] - The size of the buffer(s) encrypted by the
|
||||
external entity in units of bytes.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INSUFFICIENT_RESOURCES - The encryption would cause a counter
|
||||
to overflow.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation,
|
||||
NvU32 bufferSize);
|
||||
|
||||
#endif // _NV_UVM_INTERFACE_H_
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -92,6 +92,7 @@ typedef unsigned long long UvmGpuPointer;
|
||||
typedef struct uvmGpuSession_tag *uvmGpuSessionHandle; // gpuSessionHandle
|
||||
typedef struct uvmGpuDevice_tag *uvmGpuDeviceHandle; // gpuDeviceHandle
|
||||
typedef struct uvmGpuAddressSpace_tag *uvmGpuAddressSpaceHandle; // gpuAddressSpaceHandle
|
||||
typedef struct uvmGpuTsg_tag *uvmGpuTsgHandle; // gpuTsgHandle
|
||||
typedef struct uvmGpuChannel_tag *uvmGpuChannelHandle; // gpuChannelHandle
|
||||
typedef struct uvmGpuCopyEngine_tag *uvmGpuCopyEngineHandle; // gpuObjectHandle
|
||||
|
||||
@@ -103,6 +104,10 @@ typedef struct UvmGpuMemoryInfo_tag
|
||||
// Out: Set to TRUE, if the allocation is in sysmem.
|
||||
NvBool sysmem;
|
||||
|
||||
// Out: Set to TRUE, if this allocation is treated as EGM.
|
||||
// sysmem is also TRUE when egm is TRUE.
|
||||
NvBool egm;
|
||||
|
||||
// Out: Set to TRUE, if the allocation is a constructed
|
||||
// under a Device or Subdevice.
|
||||
// All permutations of sysmem and deviceDescendant are valid.
|
||||
@@ -110,7 +115,7 @@ typedef struct UvmGpuMemoryInfo_tag
|
||||
NvBool deviceDescendant;
|
||||
|
||||
// Out: Page size associated with the phys alloc.
|
||||
NvU32 pageSize;
|
||||
NvU64 pageSize;
|
||||
|
||||
// Out: Set to TRUE, if the allocation is contiguous.
|
||||
NvBool contig;
|
||||
@@ -124,6 +129,10 @@ typedef struct UvmGpuMemoryInfo_tag
|
||||
|
||||
// Out: Uuid of the GPU to which the allocation belongs.
|
||||
// This is only valid if deviceDescendant is NV_TRUE.
|
||||
// When egm is NV_TRUE, this is also the UUID of the GPU
|
||||
// for which EGM is local.
|
||||
// If the GPU has SMC enabled, the UUID is the GI UUID.
|
||||
// Otherwise, it is the UUID for the physical GPU.
|
||||
// Note: If the allocation is owned by a device in
|
||||
// an SLI group and the allocation is broadcast
|
||||
// across the SLI group, this UUID will be any one
|
||||
@@ -258,6 +267,7 @@ typedef struct UvmGpuChannelInfo_tag
|
||||
|
||||
// The errorNotifier is filled out when the channel hits an RC error.
|
||||
NvNotification *errorNotifier;
|
||||
NvNotification *keyRotationNotifier;
|
||||
|
||||
NvU32 hwRunlistId;
|
||||
NvU32 hwChannelId;
|
||||
@@ -280,6 +290,16 @@ typedef struct UvmGpuChannelInfo_tag
|
||||
// to kick off the new work.
|
||||
//
|
||||
volatile NvU32 *pWorkSubmissionToken;
|
||||
|
||||
// GPU VAs of both GPFIFO and GPPUT are needed in Confidential Computing
|
||||
// so a channel can be controlled via another channel (SEC2 or WLC/LCIC)
|
||||
NvU64 gpFifoGpuVa;
|
||||
NvU64 gpPutGpuVa;
|
||||
NvU64 gpGetGpuVa;
|
||||
// GPU VA of work submission offset is needed in Confidential Computing
|
||||
// so CE channels can ring doorbell of other channels as required for
|
||||
// WLC/LCIC work submission
|
||||
NvU64 workSubmissionOffsetGpuVa;
|
||||
} UvmGpuChannelInfo;
|
||||
|
||||
typedef enum
|
||||
@@ -292,6 +312,17 @@ typedef enum
|
||||
UVM_BUFFER_LOCATION_VID = 2,
|
||||
} UVM_BUFFER_LOCATION;
|
||||
|
||||
typedef struct UvmGpuTsgAllocParams_tag
|
||||
{
|
||||
// Interpreted as UVM_GPU_CHANNEL_ENGINE_TYPE
|
||||
NvU32 engineType;
|
||||
|
||||
// Index of the engine the TSG is bound to.
|
||||
// Ignored if engineType is anything other than
|
||||
// UVM_GPU_CHANNEL_ENGINE_TYPE_CE.
|
||||
NvU32 engineIndex;
|
||||
} UvmGpuTsgAllocParams;
|
||||
|
||||
typedef struct UvmGpuChannelAllocParams_tag
|
||||
{
|
||||
NvU32 numGpFifoEntries;
|
||||
@@ -299,13 +330,6 @@ typedef struct UvmGpuChannelAllocParams_tag
|
||||
// The next two fields store UVM_BUFFER_LOCATION values
|
||||
NvU32 gpFifoLoc;
|
||||
NvU32 gpPutLoc;
|
||||
|
||||
// Index of the engine the channel will be bound to
|
||||
// ignored if engineType is anything other than UVM_GPU_CHANNEL_ENGINE_TYPE_CE
|
||||
NvU32 engineIndex;
|
||||
|
||||
// interpreted as UVM_GPU_CHANNEL_ENGINE_TYPE
|
||||
NvU32 engineType;
|
||||
} UvmGpuChannelAllocParams;
|
||||
|
||||
typedef struct UvmGpuPagingChannelAllocParams_tag
|
||||
@@ -317,7 +341,7 @@ typedef struct UvmGpuPagingChannelAllocParams_tag
|
||||
|
||||
// The max number of Copy Engines supported by a GPU.
|
||||
// The gpu ops build has a static assert that this is the correct number.
|
||||
#define UVM_COPY_ENGINE_COUNT_MAX 10
|
||||
#define UVM_COPY_ENGINE_COUNT_MAX 64
|
||||
|
||||
typedef struct
|
||||
{
|
||||
@@ -376,40 +400,16 @@ typedef enum
|
||||
|
||||
typedef struct UvmGpuCaps_tag
|
||||
{
|
||||
NvU32 sysmemLink; // UVM_LINK_TYPE
|
||||
NvU32 sysmemLinkRateMBps; // See UvmGpuP2PCapsParams::totalLinkLineRateMBps
|
||||
// If numaEnabled is NV_TRUE, then the system address of allocated GPU
|
||||
// memory can be converted to struct pages. See
|
||||
// UvmGpuInfo::systemMemoryWindowStart.
|
||||
NvBool numaEnabled;
|
||||
NvU32 numaNodeId;
|
||||
|
||||
// On ATS systems, GPUs connected to different CPU sockets can have peer
|
||||
// traffic. They are called indirect peers. However, indirect peers are
|
||||
// mapped using sysmem aperture. In order to disambiguate the location of a
|
||||
// specific memory address, each GPU maps its memory to a different window
|
||||
// in the System Physical Address (SPA) space. The following fields contain
|
||||
// the base + size of such window for the GPU. systemMemoryWindowSize
|
||||
// different than 0 indicates that the window is valid.
|
||||
//
|
||||
// - If the window is valid, then we can map GPU memory to the CPU as
|
||||
// cache-coherent by adding the GPU address to the window start.
|
||||
// - If numaEnabled is NV_TRUE, then we can also convert the system
|
||||
// addresses of allocated GPU memory to struct pages.
|
||||
//
|
||||
// TODO: Bug 1986868: fix window start computation for SIMICS
|
||||
NvU64 systemMemoryWindowStart;
|
||||
NvU64 systemMemoryWindowSize;
|
||||
|
||||
// This tells if the GPU is connected to NVSwitch. On systems with NVSwitch
|
||||
// all GPUs are connected to it. If connectedToSwitch is NV_TRUE,
|
||||
// nvswitchMemoryWindowStart tells the base address for the GPU in the
|
||||
// NVSwitch address space. It is used when creating PTEs of memory mappings
|
||||
// to NVSwitch peers.
|
||||
NvBool connectedToSwitch;
|
||||
NvU64 nvswitchMemoryWindowStart;
|
||||
} UvmGpuCaps;
|
||||
|
||||
typedef struct UvmGpuAddressSpaceInfo_tag
|
||||
{
|
||||
NvU32 bigPageSize;
|
||||
NvU64 bigPageSize;
|
||||
|
||||
NvBool atsEnabled;
|
||||
|
||||
@@ -430,12 +430,14 @@ typedef struct UvmGpuAddressSpaceInfo_tag
|
||||
typedef struct UvmGpuAllocInfo_tag
|
||||
{
|
||||
NvU64 gpuPhysOffset; // Returns gpuPhysOffset if contiguous requested
|
||||
NvU32 pageSize; // default is RM big page size - 64K or 128 K" else use 4K or 2M
|
||||
NvU64 pageSize; // default is RM big page size - 64K or 128 K" else use 4K or 2M
|
||||
NvU64 alignment; // Virtual alignment
|
||||
NvBool bContiguousPhysAlloc; // Flag to request contiguous physical allocation
|
||||
NvBool bMemGrowsDown; // Causes RM to reserve physical heap from top of FB
|
||||
NvBool bPersistentVidmem; // Causes RM to allocate persistent video memory
|
||||
NvHandle hPhysHandle; // Handle for phys allocation either provided or retrieved
|
||||
NvBool bUnprotected; // Allocation to be made in unprotected memory whenever
|
||||
// SEV or GPU CC modes are enabled. Ignored otherwise
|
||||
} UvmGpuAllocInfo;
|
||||
|
||||
typedef enum
|
||||
@@ -516,6 +518,13 @@ typedef struct UvmGpuExternalMappingInfo_tag
|
||||
// In: Size of the buffer to store PTEs (in bytes).
|
||||
NvU64 pteBufferSize;
|
||||
|
||||
// In: Page size for mapping
|
||||
// If this field is passed as 0, the page size
|
||||
// of the allocation is used for mapping.
|
||||
// nvUvmInterfaceGetExternalAllocPtes must pass
|
||||
// this field as zero.
|
||||
NvU64 mappingPageSize;
|
||||
|
||||
// In: Pointer to a buffer to store PTEs.
|
||||
// Out: The interface will fill the buffer with PTEs
|
||||
NvU64 *pteBuffer;
|
||||
@@ -538,6 +547,10 @@ typedef struct UvmGpuP2PCapsParams_tag
|
||||
// the GPUs are direct peers.
|
||||
NvU32 peerIds[2];
|
||||
|
||||
// Out: peerId[i] contains gpu[i]'s EGM peer id of gpu[1 - i]. Only defined
|
||||
// if the GPUs are direct peers and EGM enabled in the system.
|
||||
NvU32 egmPeerIds[2];
|
||||
|
||||
// Out: UVM_LINK_TYPE
|
||||
NvU32 p2pLink;
|
||||
|
||||
@@ -566,8 +579,11 @@ typedef struct UvmPlatformInfo_tag
|
||||
// Out: ATS (Address Translation Services) is supported
|
||||
NvBool atsSupported;
|
||||
|
||||
// Out: AMD SEV (Secure Encrypted Virtualization) is enabled
|
||||
NvBool sevEnabled;
|
||||
// Out: True if HW trusted execution, such as AMD's SEV-SNP or Intel's TDX,
|
||||
// is enabled in the VM, indicating that Confidential Computing must be
|
||||
// also enabled in the GPU(s); these two security features are either both
|
||||
// enabled, or both disabled.
|
||||
NvBool confComputingEnabled;
|
||||
} UvmPlatformInfo;
|
||||
|
||||
typedef struct UvmGpuClientInfo_tag
|
||||
@@ -577,6 +593,20 @@ typedef struct UvmGpuClientInfo_tag
|
||||
NvHandle hSmcPartRef;
|
||||
} UvmGpuClientInfo;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
UVM_GPU_CONF_COMPUTE_MODE_NONE = 0,
|
||||
UVM_GPU_CONF_COMPUTE_MODE_HCC = 2
|
||||
} UvmGpuConfComputeMode;
|
||||
|
||||
typedef struct UvmGpuConfComputeCaps_tag
|
||||
{
|
||||
// Out: GPU's confidential compute mode
|
||||
UvmGpuConfComputeMode mode;
|
||||
// Is key rotation enabled for UVM keys
|
||||
NvBool bKeyRotationEnabled;
|
||||
} UvmGpuConfComputeCaps;
|
||||
|
||||
#define UVM_GPU_NAME_LENGTH 0x40
|
||||
|
||||
typedef struct UvmGpuInfo_tag
|
||||
@@ -584,7 +614,8 @@ typedef struct UvmGpuInfo_tag
|
||||
// Printable gpu name
|
||||
char name[UVM_GPU_NAME_LENGTH];
|
||||
|
||||
// Uuid of this gpu
|
||||
// Uuid of the physical GPU or GI UUID if nvUvmInterfaceGetGpuInfo()
|
||||
// requested information for a valid SMC partition.
|
||||
NvProcessorUuid uuid;
|
||||
|
||||
// Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_*
|
||||
@@ -641,6 +672,41 @@ typedef struct UvmGpuInfo_tag
|
||||
|
||||
UvmGpuClientInfo smcUserClientInfo;
|
||||
|
||||
// Confidential Compute capabilities of this GPU
|
||||
UvmGpuConfComputeCaps gpuConfComputeCaps;
|
||||
|
||||
// UVM_LINK_TYPE
|
||||
NvU32 sysmemLink;
|
||||
|
||||
// See UvmGpuP2PCapsParams::totalLinkLineRateMBps
|
||||
NvU32 sysmemLinkRateMBps;
|
||||
|
||||
// On coherent systems each GPU maps its memory to a window in the System
|
||||
// Physical Address (SPA) space. The following fields describe that window.
|
||||
//
|
||||
// systemMemoryWindowSize > 0 indicates that the window is valid. meaning
|
||||
// that GPU memory can be mapped by the CPU as cache-coherent by adding the
|
||||
// GPU address to the window start.
|
||||
NvU64 systemMemoryWindowStart;
|
||||
NvU64 systemMemoryWindowSize;
|
||||
|
||||
// This tells if the GPU is connected to NVSwitch. On systems with NVSwitch
|
||||
// all GPUs are connected to it. If connectedToSwitch is NV_TRUE,
|
||||
// nvswitchMemoryWindowStart tells the base address for the GPU in the
|
||||
// NVSwitch address space. It is used when creating PTEs of memory mappings
|
||||
// to NVSwitch peers.
|
||||
NvBool connectedToSwitch;
|
||||
NvU64 nvswitchMemoryWindowStart;
|
||||
|
||||
// local EGM properties
|
||||
// NV_TRUE if EGM is enabled
|
||||
NvBool egmEnabled;
|
||||
|
||||
// Peer ID to reach local EGM when EGM is enabled
|
||||
NvU8 egmPeerId;
|
||||
|
||||
// EGM base address to offset in the GMMU PTE entry for EGM mappings
|
||||
NvU64 egmBaseAddr;
|
||||
} UvmGpuInfo;
|
||||
|
||||
typedef struct UvmGpuFbInfo_tag
|
||||
@@ -649,9 +715,10 @@ typedef struct UvmGpuFbInfo_tag
|
||||
// RM regions that are not registered with PMA either.
|
||||
NvU64 maxAllocatableAddress;
|
||||
|
||||
NvU32 heapSize; // RAM in KB available for user allocations
|
||||
NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation
|
||||
NvBool bZeroFb; // Zero FB mode enabled.
|
||||
NvU32 heapSize; // RAM in KB available for user allocations
|
||||
NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation
|
||||
NvBool bZeroFb; // Zero FB mode enabled.
|
||||
NvU64 maxVidmemPageSize; // Largest GPU page size to access vidmem.
|
||||
} UvmGpuFbInfo;
|
||||
|
||||
typedef struct UvmGpuEccInfo_tag
|
||||
@@ -683,6 +750,9 @@ typedef struct UvmPmaStatistics_tag
|
||||
volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions
|
||||
volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions
|
||||
volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions
|
||||
volatile NvU64 numPages2mProtected; // PMA-wide 2MB pages count in protected memory
|
||||
volatile NvU64 numFreePages64kProtected; // PMA-wide free 64KB page count in protected memory
|
||||
volatile NvU64 numFreePages2mProtected; // PMA-wide free 2MB pages count in protected memory
|
||||
} UvmPmaStatistics;
|
||||
|
||||
/*******************************************************************************
|
||||
@@ -726,14 +796,14 @@ typedef NV_STATUS (*uvmEventResume_t) (void);
|
||||
/*******************************************************************************
|
||||
uvmEventStartDevice
|
||||
This function will be called by the GPU driver once it has finished its
|
||||
initialization to tell the UVM driver that this GPU has come up.
|
||||
initialization to tell the UVM driver that this physical GPU has come up.
|
||||
*/
|
||||
typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
|
||||
|
||||
/*******************************************************************************
|
||||
uvmEventStopDevice
|
||||
This function will be called by the GPU driver to let UVM know that a GPU
|
||||
is going down.
|
||||
This function will be called by the GPU driver to let UVM know that a
|
||||
physical GPU is going down.
|
||||
*/
|
||||
typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
|
||||
|
||||
@@ -764,7 +834,7 @@ typedef NV_STATUS (*uvmEventServiceInterrupt_t) (void *pDeviceObject,
|
||||
/*******************************************************************************
|
||||
uvmEventIsrTopHalf_t
|
||||
This function will be called by the GPU driver to let UVM know
|
||||
that an interrupt has occurred.
|
||||
that an interrupt has occurred on the given physical GPU.
|
||||
|
||||
Returns:
|
||||
NV_OK if the UVM driver handled the interrupt
|
||||
@@ -790,24 +860,87 @@ struct UvmOpsUvmEvents
|
||||
#endif
|
||||
};
|
||||
|
||||
#define UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES 32
|
||||
#define UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES 16
|
||||
|
||||
typedef union UvmFaultMetadataPacket_tag
|
||||
{
|
||||
struct {
|
||||
NvU8 authTag[UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES];
|
||||
NvBool valid;
|
||||
};
|
||||
// padding to 32Bytes
|
||||
NvU8 _padding[32];
|
||||
} UvmFaultMetadataPacket;
|
||||
|
||||
// This struct shall not be accessed nor modified directly by UVM as it is
|
||||
// entirely managed by the RM layer
|
||||
typedef struct UvmCslContext_tag
|
||||
{
|
||||
struct ccslContext_t *ctx;
|
||||
void *nvidia_stack;
|
||||
} UvmCslContext;
|
||||
|
||||
typedef struct UvmGpuFaultInfo_tag
|
||||
{
|
||||
struct
|
||||
{
|
||||
// Register mappings obtained from RM
|
||||
// Fault buffer GET register mapping.
|
||||
//
|
||||
// When Confidential Computing is enabled, GET refers to the shadow
|
||||
// buffer (see bufferAddress below), and not to the actual HW buffer.
|
||||
// In this setup, writes of GET (by UVM) do not result on re-evaluation
|
||||
// of any interrupt condition.
|
||||
volatile NvU32* pFaultBufferGet;
|
||||
|
||||
// Fault buffer PUT register mapping.
|
||||
//
|
||||
// When Confidential Computing is enabled, PUT refers to the shadow
|
||||
// buffer (see bufferAddress below), and not to the actual HW buffer.
|
||||
// In this setup, writes of PUT (by GSP-RM) do not result on
|
||||
// re-evaluation of any interrupt condition.
|
||||
volatile NvU32* pFaultBufferPut;
|
||||
// Note: this variable is deprecated since buffer overflow is not a separate
|
||||
// register from future chips.
|
||||
|
||||
// Note: this variable is deprecated since buffer overflow is not a
|
||||
// separate register from future chips.
|
||||
volatile NvU32* pFaultBufferInfo;
|
||||
|
||||
// Register mapping used to clear a replayable fault interrupt in
|
||||
// Turing+ GPUs.
|
||||
volatile NvU32* pPmcIntr;
|
||||
|
||||
// Register mapping used to enable replayable fault interrupts.
|
||||
volatile NvU32* pPmcIntrEnSet;
|
||||
|
||||
// Register mapping used to disable replayable fault interrupts.
|
||||
volatile NvU32* pPmcIntrEnClear;
|
||||
|
||||
// Register used to enable, or disable, faults on prefetches.
|
||||
volatile NvU32* pPrefetchCtrl;
|
||||
|
||||
// Replayable fault interrupt mask identifier.
|
||||
NvU32 replayableFaultMask;
|
||||
// fault buffer cpu mapping and size
|
||||
void* bufferAddress;
|
||||
|
||||
// Fault buffer CPU mapping
|
||||
void* bufferAddress;
|
||||
//
|
||||
// When Confidential Computing is disabled, the mapping points to the
|
||||
// actual HW fault buffer.
|
||||
//
|
||||
// When Confidential Computing is enabled, the mapping points to a
|
||||
// copy of the HW fault buffer. This "shadow buffer" is maintained
|
||||
// by GSP-RM.
|
||||
|
||||
// Size, in bytes, of the fault buffer pointed by bufferAddress.
|
||||
NvU32 bufferSize;
|
||||
// Mapping pointing to the start of the fault buffer metadata containing
|
||||
// a 16Byte authentication tag and a valid byte. Always NULL when
|
||||
// Confidential Computing is disabled.
|
||||
UvmFaultMetadataPacket *bufferMetadata;
|
||||
|
||||
// CSL context used for performing decryption of replayable faults when
|
||||
// Confidential Computing is enabled.
|
||||
UvmCslContext cslCtx;
|
||||
} replayable;
|
||||
struct
|
||||
{
|
||||
@@ -826,8 +959,20 @@ typedef struct UvmGpuFaultInfo_tag
|
||||
|
||||
// Preallocated stack for functions called from the UVM isr bottom half
|
||||
void *isr_bh_sp;
|
||||
|
||||
// Used only when Hopper Confidential Compute is enabled
|
||||
// Register mappings obtained from RM
|
||||
volatile NvU32* pFaultBufferPut;
|
||||
|
||||
// Used only when Hopper Confidential Compute is enabled
|
||||
// Cached get index of the non-replayable shadow buffer
|
||||
NvU32 shadowBufferGet;
|
||||
|
||||
// See replayable.bufferMetadata
|
||||
UvmFaultMetadataPacket *shadowBufferMetadata;
|
||||
} nonReplayable;
|
||||
NvHandle faultBufferHandle;
|
||||
struct Device *pDevice;
|
||||
} UvmGpuFaultInfo;
|
||||
|
||||
struct Device;
|
||||
@@ -863,12 +1008,6 @@ typedef struct UvmGpuAccessCntrInfo_tag
|
||||
void* bufferAddress;
|
||||
NvU32 bufferSize;
|
||||
NvHandle accessCntrBufferHandle;
|
||||
|
||||
// The Notification address in the access counter notification msg does not
|
||||
// contain the correct upper bits 63-47 for GPA-based notifications. RM
|
||||
// provides us with the correct offset to be added.
|
||||
// See Bug 1803015
|
||||
NvU64 baseDmaSysmemAddr;
|
||||
} UvmGpuAccessCntrInfo;
|
||||
|
||||
typedef enum
|
||||
@@ -911,6 +1050,7 @@ typedef enum UvmPmaGpuMemoryType_tag
|
||||
} UVM_PMA_GPU_MEMORY_TYPE;
|
||||
|
||||
typedef UvmGpuChannelInfo gpuChannelInfo;
|
||||
typedef UvmGpuTsgAllocParams gpuTsgAllocParams;
|
||||
typedef UvmGpuChannelAllocParams gpuChannelAllocParams;
|
||||
typedef UvmGpuCaps gpuCaps;
|
||||
typedef UvmGpuCopyEngineCaps gpuCeCaps;
|
||||
@@ -935,4 +1075,33 @@ typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo;
|
||||
typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams;
|
||||
typedef UvmPmaAllocationOptions gpuPmaAllocationOptions;
|
||||
|
||||
typedef struct UvmCslIv
|
||||
{
|
||||
NvU8 iv[12];
|
||||
NvU8 fresh;
|
||||
} UvmCslIv;
|
||||
|
||||
typedef enum UvmCslOperation
|
||||
{
|
||||
UVM_CSL_OPERATION_ENCRYPT,
|
||||
UVM_CSL_OPERATION_DECRYPT
|
||||
} UvmCslOperation;
|
||||
|
||||
typedef enum UVM_KEY_ROTATION_STATUS {
|
||||
// Key rotation complete/not in progress
|
||||
UVM_KEY_ROTATION_STATUS_IDLE = 0,
|
||||
// RM is waiting for clients to report their channels are idle for key rotation
|
||||
UVM_KEY_ROTATION_STATUS_PENDING = 1,
|
||||
// Key rotation is in progress
|
||||
UVM_KEY_ROTATION_STATUS_IN_PROGRESS = 2,
|
||||
// Key rotation timeout failure, RM will RC non-idle channels.
|
||||
// UVM should never see this status value.
|
||||
UVM_KEY_ROTATION_STATUS_FAILED_TIMEOUT = 3,
|
||||
// Key rotation failed because upper threshold was crossed, RM will RC non-idle channels
|
||||
UVM_KEY_ROTATION_STATUS_FAILED_THRESHOLD = 4,
|
||||
// Internal RM failure while rotating keys for a certain channel, RM will RC the channel.
|
||||
UVM_KEY_ROTATION_STATUS_FAILED_ROTATION = 5,
|
||||
UVM_KEY_ROTATION_STATUS_MAX_COUNT = 6,
|
||||
} UVM_KEY_ROTATION_STATUS;
|
||||
|
||||
#endif // _NV_UVM_TYPES_H_
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -45,6 +45,11 @@
|
||||
|
||||
#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff
|
||||
|
||||
#define NVKMS_MAX_SUPERFRAME_VIEWS 4
|
||||
|
||||
#define NVKMS_LOG2_LUT_ARRAY_SIZE 10
|
||||
#define NVKMS_LUT_ARRAY_SIZE (1 << NVKMS_LOG2_LUT_ARRAY_SIZE)
|
||||
|
||||
typedef NvU32 NvKmsDeviceHandle;
|
||||
typedef NvU32 NvKmsDispHandle;
|
||||
typedef NvU32 NvKmsConnectorHandle;
|
||||
@@ -53,6 +58,7 @@ typedef NvU32 NvKmsFrameLockHandle;
|
||||
typedef NvU32 NvKmsDeferredRequestFifoHandle;
|
||||
typedef NvU32 NvKmsSwapGroupHandle;
|
||||
typedef NvU32 NvKmsVblankSyncObjectHandle;
|
||||
typedef NvU32 NvKmsVblankSemControlHandle;
|
||||
|
||||
struct NvKmsSize {
|
||||
NvU16 width;
|
||||
@@ -179,6 +185,14 @@ enum NvKmsEventType {
|
||||
NVKMS_EVENT_TYPE_FLIP_OCCURRED,
|
||||
};
|
||||
|
||||
enum NvKmsFlipResult {
|
||||
NV_KMS_FLIP_RESULT_SUCCESS = 0, /* Success */
|
||||
NV_KMS_FLIP_RESULT_INVALID_PARAMS, /* Parameter validation failed */
|
||||
NV_KMS_FLIP_RESULT_IN_PROGRESS, /* Flip would fail because an outstanding
|
||||
flip containing changes that cannot be
|
||||
queued is in progress */
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
NV_EVO_SCALER_1TAP = 0,
|
||||
NV_EVO_SCALER_2TAPS = 1,
|
||||
@@ -221,6 +235,16 @@ struct NvKmsUsageBounds {
|
||||
} layer[NVKMS_MAX_LAYERS_PER_HEAD];
|
||||
};
|
||||
|
||||
/*!
|
||||
* Per-component arrays of NvU16s describing the LUT; used for both the input
|
||||
* LUT and output LUT.
|
||||
*/
|
||||
struct NvKmsLutRamps {
|
||||
NvU16 red[NVKMS_LUT_ARRAY_SIZE]; /*! in */
|
||||
NvU16 green[NVKMS_LUT_ARRAY_SIZE]; /*! in */
|
||||
NvU16 blue[NVKMS_LUT_ARRAY_SIZE]; /*! in */
|
||||
};
|
||||
|
||||
/*
|
||||
* A 3x4 row-major colorspace conversion matrix.
|
||||
*
|
||||
@@ -531,6 +555,18 @@ typedef struct {
|
||||
NvBool noncoherent;
|
||||
} NvKmsDispIOCoherencyModes;
|
||||
|
||||
enum NvKmsInputColorRange {
|
||||
/*
|
||||
* If DEFAULT is provided, driver will assume full range for RGB formats
|
||||
* and limited range for YUV formats.
|
||||
*/
|
||||
NVKMS_INPUT_COLORRANGE_DEFAULT = 0,
|
||||
|
||||
NVKMS_INPUT_COLORRANGE_LIMITED = 1,
|
||||
|
||||
NVKMS_INPUT_COLORRANGE_FULL = 2,
|
||||
};
|
||||
|
||||
enum NvKmsInputColorSpace {
|
||||
/* Unknown colorspace; no de-gamma will be applied */
|
||||
NVKMS_INPUT_COLORSPACE_NONE = 0,
|
||||
@@ -542,6 +578,12 @@ enum NvKmsInputColorSpace {
|
||||
NVKMS_INPUT_COLORSPACE_BT2100_PQ = 2,
|
||||
};
|
||||
|
||||
enum NvKmsOutputColorimetry {
|
||||
NVKMS_OUTPUT_COLORIMETRY_DEFAULT = 0,
|
||||
|
||||
NVKMS_OUTPUT_COLORIMETRY_BT2100 = 1,
|
||||
};
|
||||
|
||||
enum NvKmsOutputTf {
|
||||
/*
|
||||
* NVKMS itself won't apply any OETF (clients are still
|
||||
@@ -552,6 +594,17 @@ enum NvKmsOutputTf {
|
||||
NVKMS_OUTPUT_TF_PQ = 2,
|
||||
};
|
||||
|
||||
/*!
|
||||
* EOTF Data Byte 1 as per CTA-861-G spec.
|
||||
* This is expected to match exactly with the spec.
|
||||
*/
|
||||
enum NvKmsInfoFrameEOTF {
|
||||
NVKMS_INFOFRAME_EOTF_SDR_GAMMA = 0,
|
||||
NVKMS_INFOFRAME_EOTF_HDR_GAMMA = 1,
|
||||
NVKMS_INFOFRAME_EOTF_ST2084 = 2,
|
||||
NVKMS_INFOFRAME_EOTF_HLG = 3,
|
||||
};
|
||||
|
||||
/*!
|
||||
* HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec.
|
||||
* This is expected to match exactly with the spec.
|
||||
@@ -605,4 +658,29 @@ struct NvKmsHDRStaticMetadata {
|
||||
NvU16 maxFALL;
|
||||
};
|
||||
|
||||
/*!
|
||||
* A superframe is made of two or more video streams that are combined in
|
||||
* a specific way. A DP serializer (an external device connected to a Tegra
|
||||
* ARM SOC over DP or HDMI) can receive a video stream comprising multiple
|
||||
* videos combined into a single frame and then split it into multiple
|
||||
* video streams. The following structure describes the number of views
|
||||
* and dimensions of each view inside a superframe.
|
||||
*/
|
||||
struct NvKmsSuperframeInfo {
|
||||
NvU8 numViews;
|
||||
struct {
|
||||
/* x offset inside superframe at which this view starts */
|
||||
NvU16 x;
|
||||
|
||||
/* y offset inside superframe at which this view starts */
|
||||
NvU16 y;
|
||||
|
||||
/* Horizontal active width in pixels for this view */
|
||||
NvU16 width;
|
||||
|
||||
/* Vertical active height in lines for this view */
|
||||
NvU16 height;
|
||||
} view[NVKMS_MAX_SUPERFRAME_VIEWS];
|
||||
};
|
||||
|
||||
#endif /* NVKMS_API_TYPES_H */
|
||||
|
||||
@@ -49,6 +49,8 @@ struct NvKmsKapiDevice;
|
||||
struct NvKmsKapiMemory;
|
||||
struct NvKmsKapiSurface;
|
||||
struct NvKmsKapiChannelEvent;
|
||||
struct NvKmsKapiSemaphoreSurface;
|
||||
struct NvKmsKapiSemaphoreSurfaceCallback;
|
||||
|
||||
typedef NvU32 NvKmsKapiConnector;
|
||||
typedef NvU32 NvKmsKapiDisplay;
|
||||
@@ -67,6 +69,14 @@ typedef NvU32 NvKmsKapiDisplay;
|
||||
*/
|
||||
typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32);
|
||||
|
||||
/*
|
||||
* Note: Same as above, this function must not call back into NVKMS-KAPI, nor
|
||||
* directly into RM. Doing so could cause deadlocks given the notification
|
||||
* function will most likely be called from within RM's interrupt handler
|
||||
* callchain.
|
||||
*/
|
||||
typedef void NvKmsSemaphoreSurfaceCallbackProc(void *pData);
|
||||
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
@@ -126,6 +136,11 @@ struct NvKmsKapiDeviceResourcesInfo {
|
||||
NvU32 validCursorCompositionModes;
|
||||
NvU64 supportedCursorSurfaceMemoryFormats;
|
||||
|
||||
struct {
|
||||
NvU64 maxSubmittedOffset;
|
||||
NvU64 stride;
|
||||
} semsurf;
|
||||
|
||||
struct {
|
||||
NvU16 validRRTransforms;
|
||||
NvU32 validCompositionModes;
|
||||
@@ -165,8 +180,6 @@ struct NvKmsKapiConnectorInfo {
|
||||
|
||||
NvU32 physicalIndex;
|
||||
|
||||
NvU32 headMask;
|
||||
|
||||
NvKmsConnectorSignalFormat signalFormat;
|
||||
NvKmsConnectorType type;
|
||||
|
||||
@@ -194,6 +207,7 @@ struct NvKmsKapiStaticDisplayInfo {
|
||||
NvU32 numPossibleClones;
|
||||
NvKmsKapiDisplay possibleCloneHandles[NVKMS_KAPI_MAX_CLONE_DISPLAYS];
|
||||
|
||||
NvU32 headMask;
|
||||
};
|
||||
|
||||
struct NvKmsKapiSyncpt {
|
||||
@@ -219,8 +233,10 @@ struct NvKmsKapiLayerConfig {
|
||||
struct NvKmsRRParams rrParams;
|
||||
struct NvKmsKapiSyncpt syncptParams;
|
||||
|
||||
struct NvKmsHDRStaticMetadata hdrMetadata;
|
||||
NvBool hdrMetadataSpecified;
|
||||
struct {
|
||||
struct NvKmsHDRStaticMetadata val;
|
||||
NvBool enabled;
|
||||
} hdrMetadata;
|
||||
|
||||
enum NvKmsOutputTf tf;
|
||||
|
||||
@@ -234,16 +250,21 @@ struct NvKmsKapiLayerConfig {
|
||||
NvU16 dstWidth, dstHeight;
|
||||
|
||||
enum NvKmsInputColorSpace inputColorSpace;
|
||||
struct NvKmsCscMatrix csc;
|
||||
NvBool cscUseMain;
|
||||
};
|
||||
|
||||
struct NvKmsKapiLayerRequestedConfig {
|
||||
struct NvKmsKapiLayerConfig config;
|
||||
struct {
|
||||
NvBool surfaceChanged : 1;
|
||||
NvBool srcXYChanged : 1;
|
||||
NvBool srcWHChanged : 1;
|
||||
NvBool dstXYChanged : 1;
|
||||
NvBool dstWHChanged : 1;
|
||||
NvBool surfaceChanged : 1;
|
||||
NvBool srcXYChanged : 1;
|
||||
NvBool srcWHChanged : 1;
|
||||
NvBool dstXYChanged : 1;
|
||||
NvBool dstWHChanged : 1;
|
||||
NvBool cscChanged : 1;
|
||||
NvBool tfChanged : 1;
|
||||
NvBool hdrMetadataChanged : 1;
|
||||
} flags;
|
||||
};
|
||||
|
||||
@@ -287,14 +308,41 @@ struct NvKmsKapiHeadModeSetConfig {
|
||||
struct NvKmsKapiDisplayMode mode;
|
||||
|
||||
NvBool vrrEnabled;
|
||||
|
||||
struct {
|
||||
NvBool enabled;
|
||||
enum NvKmsInfoFrameEOTF eotf;
|
||||
struct NvKmsHDRStaticMetadata staticMetadata;
|
||||
} hdrInfoFrame;
|
||||
|
||||
enum NvKmsOutputColorimetry colorimetry;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
NvBool specified;
|
||||
NvU32 depth;
|
||||
NvU32 start;
|
||||
NvU32 end;
|
||||
struct NvKmsLutRamps *pRamps;
|
||||
} input;
|
||||
|
||||
struct {
|
||||
NvBool specified;
|
||||
NvBool enabled;
|
||||
struct NvKmsLutRamps *pRamps;
|
||||
} output;
|
||||
} lut;
|
||||
};
|
||||
|
||||
struct NvKmsKapiHeadRequestedConfig {
|
||||
struct NvKmsKapiHeadModeSetConfig modeSetConfig;
|
||||
struct {
|
||||
NvBool activeChanged : 1;
|
||||
NvBool displaysChanged : 1;
|
||||
NvBool modeChanged : 1;
|
||||
NvBool activeChanged : 1;
|
||||
NvBool displaysChanged : 1;
|
||||
NvBool modeChanged : 1;
|
||||
NvBool hdrInfoFrameChanged : 1;
|
||||
NvBool colorimetryChanged : 1;
|
||||
NvBool lutChanged : 1;
|
||||
} flags;
|
||||
|
||||
struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig;
|
||||
@@ -319,6 +367,7 @@ struct NvKmsKapiHeadReplyConfig {
|
||||
};
|
||||
|
||||
struct NvKmsKapiModeSetReplyConfig {
|
||||
enum NvKmsFlipResult flipResult;
|
||||
struct NvKmsKapiHeadReplyConfig
|
||||
headReplyConfig[NVKMS_KAPI_MAX_HEADS];
|
||||
};
|
||||
@@ -435,6 +484,14 @@ enum NvKmsKapiAllocationType {
|
||||
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2,
|
||||
};
|
||||
|
||||
typedef enum NvKmsKapiRegisterWaiterResultRec {
|
||||
NVKMS_KAPI_REG_WAITER_FAILED,
|
||||
NVKMS_KAPI_REG_WAITER_SUCCESS,
|
||||
NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED,
|
||||
} NvKmsKapiRegisterWaiterResult;
|
||||
|
||||
typedef void NvKmsKapiSuspendResumeCallbackFunc(NvBool suspend);
|
||||
|
||||
struct NvKmsKapiFunctionsTable {
|
||||
|
||||
/*!
|
||||
@@ -520,14 +577,51 @@ struct NvKmsKapiFunctionsTable {
|
||||
);
|
||||
|
||||
/*!
|
||||
* Revoke modeset permissions previously granted. This currently applies for all
|
||||
* previous grant requests for this device.
|
||||
* Revoke modeset permissions previously granted. Only one (dispIndex,
|
||||
* head, display) is currently supported.
|
||||
*
|
||||
* \param [in] device A device returned by allocateDevice().
|
||||
* \param [in] device A device returned by allocateDevice().
|
||||
*
|
||||
* \param [in] head head of display.
|
||||
*
|
||||
* \param [in] display The display to revoke.
|
||||
*
|
||||
* \return NV_TRUE on success, NV_FALSE on failure.
|
||||
*/
|
||||
NvBool (*revokePermissions)(struct NvKmsKapiDevice *device);
|
||||
NvBool (*revokePermissions)
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
NvU32 head,
|
||||
NvKmsKapiDisplay display
|
||||
);
|
||||
|
||||
/*!
|
||||
* Grant modeset sub-owner permissions to fd. This is used by clients to
|
||||
* convert drm 'master' permissions into nvkms sub-owner permission.
|
||||
*
|
||||
* \param [in] fd fd from opening /dev/nvidia-modeset.
|
||||
*
|
||||
* \param [in] device A device returned by allocateDevice().
|
||||
*
|
||||
* \return NV_TRUE on success, NV_FALSE on failure.
|
||||
*/
|
||||
NvBool (*grantSubOwnership)
|
||||
(
|
||||
NvS32 fd,
|
||||
struct NvKmsKapiDevice *device
|
||||
);
|
||||
|
||||
/*!
|
||||
* Revoke sub-owner permissions previously granted.
|
||||
*
|
||||
* \param [in] device A device returned by allocateDevice().
|
||||
*
|
||||
* \return NV_TRUE on success, NV_FALSE on failure.
|
||||
*/
|
||||
NvBool (*revokeSubOwnership)
|
||||
(
|
||||
struct NvKmsKapiDevice *device
|
||||
);
|
||||
|
||||
/*!
|
||||
* Registers for notification, via
|
||||
@@ -1065,6 +1159,21 @@ struct NvKmsKapiFunctionsTable {
|
||||
NvU64 *pPages
|
||||
);
|
||||
|
||||
/*!
|
||||
* Check if this memory object can be scanned out for display.
|
||||
*
|
||||
* \param [in] device A device allocated using allocateDevice().
|
||||
*
|
||||
* \param [in] memory The memory object to check for display support.
|
||||
*
|
||||
* \return NV_TRUE if this memory can be displayed, NV_FALSE if not.
|
||||
*/
|
||||
NvBool (*isMemoryValidForDisplay)
|
||||
(
|
||||
const struct NvKmsKapiDevice *device,
|
||||
const struct NvKmsKapiMemory *memory
|
||||
);
|
||||
|
||||
/*
|
||||
* Import SGT as a memory handle.
|
||||
*
|
||||
@@ -1099,6 +1208,208 @@ struct NvKmsKapiFunctionsTable {
|
||||
NvP64 dmaBuf,
|
||||
NvU32 limit);
|
||||
|
||||
/*!
|
||||
* Import a semaphore surface allocated elsewhere to NVKMS and return a
|
||||
* handle to the new object.
|
||||
*
|
||||
* \param [in] device A device allocated using allocateDevice().
|
||||
*
|
||||
* \param [in] nvKmsParamsUser Userspace pointer to driver-specific
|
||||
* parameters describing the semaphore
|
||||
* surface being imported.
|
||||
*
|
||||
* \param [in] nvKmsParamsSize Size of the driver-specific parameter
|
||||
* struct.
|
||||
*
|
||||
* \param [out] pSemaphoreMap Returns a CPU mapping of the semaphore
|
||||
* surface's semaphore memory to the client.
|
||||
*
|
||||
* \param [out] pMaxSubmittedMap Returns a CPU mapping of the semaphore
|
||||
* surface's semaphore memory to the client.
|
||||
*
|
||||
* \return struct NvKmsKapiSemaphoreSurface* on success, NULL on failure.
|
||||
*/
|
||||
struct NvKmsKapiSemaphoreSurface* (*importSemaphoreSurface)
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
NvU64 nvKmsParamsUser,
|
||||
NvU64 nvKmsParamsSize,
|
||||
void **pSemaphoreMap,
|
||||
void **pMaxSubmittedMap
|
||||
);
|
||||
|
||||
/*!
|
||||
* Free an imported semaphore surface.
|
||||
*
|
||||
* \param [in] device The device passed to
|
||||
* importSemaphoreSurface() when creating
|
||||
* semaphoreSurface.
|
||||
*
|
||||
* \param [in] semaphoreSurface A semaphore surface returned by
|
||||
* importSemaphoreSurface().
|
||||
*/
|
||||
void (*freeSemaphoreSurface)
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
struct NvKmsKapiSemaphoreSurface *semaphoreSurface
|
||||
);
|
||||
|
||||
/*!
|
||||
* Register a callback to be called when a semaphore reaches a value.
|
||||
*
|
||||
* The callback will be called when the semaphore at index in
|
||||
* semaphoreSurface reaches the value wait_value. The callback will
|
||||
* be called at most once and is automatically unregistered when called.
|
||||
* It may also be unregistered (i.e., cancelled) explicitly using the
|
||||
* unregisterSemaphoreSurfaceCallback() function. To avoid leaking the
|
||||
* memory used to track the registered callback, callers must ensure one
|
||||
* of these methods of unregistration is used for every successful
|
||||
* callback registration that returns a non-NULL pCallbackHandle.
|
||||
*
|
||||
* \param [in] device The device passed to
|
||||
* importSemaphoreSurface() when creating
|
||||
* semaphoreSurface.
|
||||
*
|
||||
* \param [in] semaphoreSurface A semaphore surface returned by
|
||||
* importSemaphoreSurface().
|
||||
*
|
||||
* \param [in] pCallback A pointer to the function to call when
|
||||
* the specified value is reached. NULL
|
||||
* means no callback.
|
||||
*
|
||||
* \param [in] pData Arbitrary data to be passed back to the
|
||||
* callback as its sole parameter.
|
||||
*
|
||||
* \param [in] index The index of the semaphore within
|
||||
* semaphoreSurface.
|
||||
*
|
||||
* \param [in] wait_value The value the semaphore must reach or
|
||||
* exceed before the callback is called.
|
||||
*
|
||||
* \param [in] new_value The value the semaphore will be set to
|
||||
* when it reaches or exceeds <wait_value>.
|
||||
* 0 means do not update the value.
|
||||
*
|
||||
* \param [out] pCallbackHandle On success, the value pointed to will
|
||||
* contain an opaque handle to the
|
||||
* registered callback that may be used to
|
||||
* cancel it if needed. Unused if pCallback
|
||||
* is NULL.
|
||||
*
|
||||
* \return NVKMS_KAPI_REG_WAITER_SUCCESS if the waiter was registered or if
|
||||
* no callback was requested and the semaphore at <index> has
|
||||
* already reached or exceeded <wait_value>
|
||||
*
|
||||
* NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED if a callback was
|
||||
* requested and the semaphore at <index> has already reached or
|
||||
* exceeded <wait_value>
|
||||
*
|
||||
* NVKMS_KAPI_REG_WAITER_FAILED if waiter registration failed.
|
||||
*/
|
||||
NvKmsKapiRegisterWaiterResult
|
||||
(*registerSemaphoreSurfaceCallback)
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
struct NvKmsKapiSemaphoreSurface *semaphoreSurface,
|
||||
NvKmsSemaphoreSurfaceCallbackProc *pCallback,
|
||||
void *pData,
|
||||
NvU64 index,
|
||||
NvU64 wait_value,
|
||||
NvU64 new_value,
|
||||
struct NvKmsKapiSemaphoreSurfaceCallback **pCallbackHandle
|
||||
);
|
||||
|
||||
/*!
|
||||
* Unregister a callback registered via registerSemaphoreSurfaceCallback()
|
||||
*
|
||||
* If the callback has not yet been called, this function will cancel the
|
||||
* callback and free its associated resources.
|
||||
*
|
||||
* Note this function treats the callback handle as a pointer. While this
|
||||
* function does not dereference that pointer itself, the underlying call
|
||||
* to RM does within a properly guarded critical section that first ensures
|
||||
* it is not in the process of being used within a callback. This means
|
||||
* the callstack must take into consideration that pointers are not in
|
||||
* general unique handles if they may have been freed, since a subsequent
|
||||
* malloc could return the same pointer value at that point. This callchain
|
||||
* avoids that by leveraging the behavior of the underlying RM APIs:
|
||||
*
|
||||
* 1) A callback handle is referenced relative to its corresponding
|
||||
* (semaphore surface, index, wait_value) tuple here and within RM. It
|
||||
* is not a valid handle outside of that scope.
|
||||
*
|
||||
* 2) A callback can not be registered against an already-reached value
|
||||
* for a given semaphore surface index.
|
||||
*
|
||||
* 3) A given callback handle can not be registered twice against the same
|
||||
* (semaphore surface, index, wait_value) tuple, so unregistration will
|
||||
* never race with registration at the RM level, and would only race at
|
||||
* a higher level if used incorrectly. Since this is kernel code, we
|
||||
* can safely assume there won't be malicious clients purposely misuing
|
||||
* the API, but the burden is placed on the caller to ensure its usage
|
||||
* does not lead to races at higher levels.
|
||||
*
|
||||
* These factors considered together ensure any valid registered handle is
|
||||
* either still in the relevant waiter list and refers to the same event/
|
||||
* callback as when it was registered, or has been removed from the list
|
||||
* as part of a critical section that also destroys the list itself and
|
||||
* makes future lookups in that list impossible, and hence eliminates the
|
||||
* chance of comparing a stale handle with a new handle of the same value
|
||||
* as part of a lookup.
|
||||
*
|
||||
* \param [in] device The device passed to
|
||||
* importSemaphoreSurface() when creating
|
||||
* semaphoreSurface.
|
||||
*
|
||||
* \param [in] semaphoreSurface The semaphore surface passed to
|
||||
* registerSemaphoreSurfaceCallback() when
|
||||
* registering the callback.
|
||||
*
|
||||
* \param [in] index The index passed to
|
||||
* registerSemaphoreSurfaceCallback() when
|
||||
* registering the callback.
|
||||
*
|
||||
* \param [in] wait_value The wait_value passed to
|
||||
* registerSemaphoreSurfaceCallback() when
|
||||
* registering the callback.
|
||||
*
|
||||
* \param [in] callbackHandle The callback handle returned by
|
||||
* registerSemaphoreSurfaceCallback().
|
||||
*/
|
||||
NvBool
|
||||
(*unregisterSemaphoreSurfaceCallback)
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
struct NvKmsKapiSemaphoreSurface *semaphoreSurface,
|
||||
NvU64 index,
|
||||
NvU64 wait_value,
|
||||
struct NvKmsKapiSemaphoreSurfaceCallback *callbackHandle
|
||||
);
|
||||
|
||||
/*!
|
||||
* Update the value of a semaphore surface from the CPU.
|
||||
*
|
||||
* Update the semaphore value at the specified index from the CPU, then
|
||||
* wake up any pending CPU waiters associated with that index that are
|
||||
* waiting on it reaching a value <= the new value.
|
||||
*/
|
||||
NvBool
|
||||
(*setSemaphoreSurfaceValue)
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
struct NvKmsKapiSemaphoreSurface *semaphoreSurface,
|
||||
NvU64 index,
|
||||
NvU64 new_value
|
||||
);
|
||||
|
||||
/*!
|
||||
* Set the callback function for suspending and resuming the display system.
|
||||
*/
|
||||
void
|
||||
(*setSuspendResumeCallback)
|
||||
(
|
||||
NvKmsKapiSuspendResumeCallbackFunc *function
|
||||
);
|
||||
};
|
||||
|
||||
/** @} */
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
|
||||
//
|
||||
// This file was generated with FINN, an NVIDIA coding tool.
|
||||
// Source file: nvlimits.finn
|
||||
// Source file: nvlimits.finn
|
||||
//
|
||||
|
||||
|
||||
|
||||
@@ -919,6 +919,9 @@ static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address)
|
||||
//
|
||||
#define NV_BIT_SET_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) |= NVBIT64(b); else (hi) |= NVBIT64( b & 0x3F ); }
|
||||
|
||||
// Get the number of elements the specified fixed-size array
|
||||
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif //__cplusplus
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -149,6 +149,11 @@ NV_STATUS_CODE(NV_ERR_NVLINK_TRAINING_ERROR, 0x00000077, "Nvlink Train
|
||||
NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Configuration Error")
|
||||
NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt")
|
||||
NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded")
|
||||
NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value")
|
||||
NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event")
|
||||
NV_STATUS_CODE(NV_ERR_KEY_ROTATION_IN_PROGRESS, 0x0000007D, "Operation not allowed as key rotation is in progress")
|
||||
NV_STATUS_CODE(NV_ERR_NVSWITCH_FABRIC_NOT_READY, 0x00000081, "Nvswitch Fabric Status or Fabric Probe is not yet complete, caller needs to retry")
|
||||
NV_STATUS_CODE(NV_ERR_NVSWITCH_FABRIC_FAILURE, 0x00000082, "Nvswitch Fabric Probe failed")
|
||||
|
||||
// Warnings:
|
||||
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")
|
||||
|
||||
@@ -145,7 +145,12 @@ typedef signed short NvS16; /* -32768 to 32767 */
|
||||
#endif
|
||||
|
||||
// Macro to build an NvU32 from four bytes, listed from msb to lsb
|
||||
#define NvU32_BUILD(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
||||
#define NvU32_BUILD(a, b, c, d) \
|
||||
((NvU32)( \
|
||||
(((NvU32)(a) & 0xff) << 24) | \
|
||||
(((NvU32)(b) & 0xff) << 16) | \
|
||||
(((NvU32)(c) & 0xff) << 8) | \
|
||||
(((NvU32)(d) & 0xff))))
|
||||
|
||||
#if NVTYPES_USE_STDINT
|
||||
typedef uint32_t NvV32; /* "void": enumerated or multiple fields */
|
||||
@@ -513,6 +518,12 @@ typedef struct
|
||||
// place to re-locate these from nvos.h which cannot be included by a number
|
||||
// of builds that need them
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER)
|
||||
#define NV_ATTRIBUTE_UNUSED __attribute__((__unused__))
|
||||
#else
|
||||
#define NV_ATTRIBUTE_UNUSED
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
|
||||
#if _MSC_VER >= 1310
|
||||
@@ -536,8 +547,6 @@ typedef struct
|
||||
|
||||
#define NV_FORCERESULTCHECK
|
||||
|
||||
#define NV_ATTRIBUTE_UNUSED
|
||||
|
||||
#define NV_FORMAT_PRINTF(_f, _a)
|
||||
|
||||
#else // ! defined(_MSC_VER)
|
||||
@@ -635,12 +644,6 @@ typedef struct
|
||||
#define NV_FORCERESULTCHECK
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER)
|
||||
#define NV_ATTRIBUTE_UNUSED __attribute__((__unused__))
|
||||
#else
|
||||
#define NV_ATTRIBUTE_UNUSED
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Functions decorated with NV_FORMAT_PRINTF(f, a) have a format string at
|
||||
* parameter number 'f' and variadic arguments start at parameter number 'a'.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -67,7 +67,6 @@ typedef struct os_wait_queue os_wait_queue;
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
NvU64 NV_API_CALL os_get_num_phys_pages (void);
|
||||
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
|
||||
void NV_API_CALL os_free_mem (void *);
|
||||
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
|
||||
@@ -105,7 +104,6 @@ void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
|
||||
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
|
||||
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
|
||||
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
|
||||
NV_STATUS NV_API_CALL os_flush_user_cache (void);
|
||||
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
|
||||
@@ -162,10 +160,9 @@ NvBool NV_API_CALL os_is_vgx_hyper (void);
|
||||
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
|
||||
NvBool NV_API_CALL os_is_grid_supported (void);
|
||||
NvU32 NV_API_CALL os_get_grid_csp_support (void);
|
||||
void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64);
|
||||
void NV_API_CALL os_bug_check (NvU32, const char *);
|
||||
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
|
||||
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
|
||||
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
|
||||
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
|
||||
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
|
||||
@@ -181,7 +178,6 @@ NV_STATUS NV_API_CALL os_put_page (NvU64 address);
|
||||
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
|
||||
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
|
||||
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
|
||||
NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *);
|
||||
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
|
||||
void NV_API_CALL os_close_file (void *);
|
||||
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
|
||||
@@ -189,7 +185,7 @@ NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU
|
||||
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
|
||||
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
|
||||
NvBool NV_API_CALL os_is_nvswitch_present (void);
|
||||
void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
|
||||
NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
|
||||
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
|
||||
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
|
||||
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
|
||||
@@ -201,6 +197,8 @@ nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *,
|
||||
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
|
||||
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
|
||||
void NV_API_CALL os_nv_cap_close_fd (int);
|
||||
NvS32 NV_API_CALL os_imex_channel_get (NvU64);
|
||||
NvS32 NV_API_CALL os_imex_channel_count (void);
|
||||
|
||||
enum os_pci_req_atomics_type {
|
||||
OS_INTF_PCIE_REQ_ATOMICS_32BIT,
|
||||
@@ -208,13 +206,23 @@ enum os_pci_req_atomics_type {
|
||||
OS_INTF_PCIE_REQ_ATOMICS_128BIT
|
||||
};
|
||||
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
|
||||
NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *);
|
||||
NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32);
|
||||
NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address);
|
||||
void* NV_API_CALL os_get_pid_info(void);
|
||||
void NV_API_CALL os_put_pid_info(void *pid_info);
|
||||
NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
|
||||
|
||||
extern NvU32 os_page_size;
|
||||
extern NvU64 os_page_mask;
|
||||
extern NvU8 os_page_shift;
|
||||
extern NvU32 os_sev_status;
|
||||
extern NvBool os_sev_enabled;
|
||||
extern NvBool os_cc_enabled;
|
||||
extern NvBool os_cc_sev_snp_enabled;
|
||||
extern NvBool os_cc_snp_vtom_enabled;
|
||||
extern NvBool os_cc_tdx_enabled;
|
||||
extern NvBool os_dma_buf_enabled;
|
||||
extern NvBool os_imex_channel_is_supported;
|
||||
|
||||
/*
|
||||
* ---------------------------------------------------------------------------
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -37,7 +37,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_create_session (nvidia_stack_t *, nvgpuSessio
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (nvidia_stack_t *, nvgpuSessionHandle_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_device_create (nvidia_stack_t *, nvgpuSessionHandle_t, const nvgpuInfo_t *, const NvProcessorUuid *, nvgpuDeviceHandle_t *, NvBool);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (nvidia_stack_t *, nvgpuDeviceHandle_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, NvBool, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t);
|
||||
@@ -45,7 +45,6 @@ NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddres
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(nvidia_stack_t *, void *, NvLength, NvU32 , nvgpuPmaAllocationOptions_t, NvU64 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(nvidia_stack_t *, nvgpuDeviceHandle_t, void **, const nvgpuPmaStatistics_t *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(nvidia_stack_t *sp, void *, nvPmaEvictPagesCallback, nvPmaEvictRangeCallback, void *);
|
||||
void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t *sp, void *);
|
||||
@@ -56,7 +55,9 @@ NV_STATUS NV_API_CALL rm_gpu_ops_get_p2p_caps(nvidia_stack_t *, nvgpuDeviceHan
|
||||
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, NvLength, void **, NvU32);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_ummap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, void*);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, const nvgpuChannelAllocParams_t *, nvgpuChannelHandle_t *, nvgpuChannelInfo_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_tsg_allocate(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, const nvgpuTsgAllocParams_t *, nvgpuTsgHandle_t *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_tsg_destroy(nvidia_stack_t *, nvgpuTsgHandle_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *, const nvgpuTsgHandle_t, const nvgpuChannelAllocParams_t *, nvgpuChannelHandle_t *, nvgpuChannelInfo_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t *, nvgpuChannelHandle_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_memory_free(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuCaps_t);
|
||||
@@ -74,8 +75,10 @@ NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *, nvgpuDevi
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, void *, NvU32 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_toggle_prefetch_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, NvU32);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_own_access_cntr_intr(nvidia_stack_t *, nvgpuSessionHandle_t, nvgpuAccessCntrInfo_t, NvBool);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, nvgpuAccessCntrConfig_t);
|
||||
@@ -98,4 +101,16 @@ NV_STATUS NV_API_CALL rm_gpu_ops_paging_channels_map(nvidia_stack_t *, nvgpuAdd
|
||||
void NV_API_CALL rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, nvgpuPagingChannelHandle_t, char *, NvU32);
|
||||
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_key(nvidia_stack_t *, UvmCslContext *[], NvU32);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU32, NvU8 *, NvU8 const *, NvU32, NvU8 const *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_encryption(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU32);
|
||||
|
||||
#endif
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
103
kernel-open/header-presence-tests.mk
Normal file
103
kernel-open/header-presence-tests.mk
Normal file
@@ -0,0 +1,103 @@
|
||||
# Each of these headers is checked for presence with a test #include; a
|
||||
# corresponding #define will be generated in conftest/headers.h.
|
||||
NV_HEADER_PRESENCE_TESTS = \
|
||||
asm/system.h \
|
||||
drm/drmP.h \
|
||||
drm/drm_aperture.h \
|
||||
drm/drm_auth.h \
|
||||
drm/drm_gem.h \
|
||||
drm/drm_crtc.h \
|
||||
drm/drm_color_mgmt.h \
|
||||
drm/drm_atomic.h \
|
||||
drm/drm_atomic_helper.h \
|
||||
drm/drm_atomic_state_helper.h \
|
||||
drm/drm_encoder.h \
|
||||
drm/drm_atomic_uapi.h \
|
||||
drm/drm_drv.h \
|
||||
drm/drm_fbdev_generic.h \
|
||||
drm/drm_fbdev_ttm.h \
|
||||
drm/drm_framebuffer.h \
|
||||
drm/drm_connector.h \
|
||||
drm/drm_probe_helper.h \
|
||||
drm/drm_blend.h \
|
||||
drm/drm_fourcc.h \
|
||||
drm/drm_prime.h \
|
||||
drm/drm_plane.h \
|
||||
drm/drm_vblank.h \
|
||||
drm/drm_file.h \
|
||||
drm/drm_ioctl.h \
|
||||
drm/drm_device.h \
|
||||
drm/drm_mode_config.h \
|
||||
drm/drm_modeset_lock.h \
|
||||
dt-bindings/interconnect/tegra_icc_id.h \
|
||||
generated/autoconf.h \
|
||||
generated/compile.h \
|
||||
generated/utsrelease.h \
|
||||
linux/efi.h \
|
||||
linux/kconfig.h \
|
||||
linux/platform/tegra/mc_utils.h \
|
||||
linux/printk.h \
|
||||
linux/ratelimit.h \
|
||||
linux/prio_tree.h \
|
||||
linux/log2.h \
|
||||
linux/of.h \
|
||||
linux/bug.h \
|
||||
linux/sched.h \
|
||||
linux/sched/mm.h \
|
||||
linux/sched/signal.h \
|
||||
linux/sched/task.h \
|
||||
linux/sched/task_stack.h \
|
||||
xen/ioemu.h \
|
||||
linux/fence.h \
|
||||
linux/dma-fence.h \
|
||||
linux/dma-resv.h \
|
||||
soc/tegra/chip-id.h \
|
||||
soc/tegra/fuse.h \
|
||||
soc/tegra/tegra_bpmp.h \
|
||||
video/nv_internal.h \
|
||||
linux/platform/tegra/dce/dce-client-ipc.h \
|
||||
linux/nvhost.h \
|
||||
linux/nvhost_t194.h \
|
||||
linux/host1x-next.h \
|
||||
asm/book3s/64/hash-64k.h \
|
||||
asm/set_memory.h \
|
||||
asm/prom.h \
|
||||
asm/powernv.h \
|
||||
linux/atomic.h \
|
||||
asm/barrier.h \
|
||||
asm/opal-api.h \
|
||||
sound/hdaudio.h \
|
||||
asm/pgtable_types.h \
|
||||
asm/page.h \
|
||||
linux/stringhash.h \
|
||||
linux/dma-map-ops.h \
|
||||
rdma/peer_mem.h \
|
||||
sound/hda_codec.h \
|
||||
linux/dma-buf.h \
|
||||
linux/time.h \
|
||||
linux/platform_device.h \
|
||||
linux/mutex.h \
|
||||
linux/reset.h \
|
||||
linux/of_platform.h \
|
||||
linux/of_device.h \
|
||||
linux/of_gpio.h \
|
||||
linux/gpio.h \
|
||||
linux/gpio/consumer.h \
|
||||
linux/interconnect.h \
|
||||
linux/pm_runtime.h \
|
||||
linux/clk.h \
|
||||
linux/clk-provider.h \
|
||||
linux/ioasid.h \
|
||||
linux/stdarg.h \
|
||||
linux/iosys-map.h \
|
||||
asm/coco.h \
|
||||
linux/vfio_pci_core.h \
|
||||
linux/mdev.h \
|
||||
soc/tegra/bpmp-abi.h \
|
||||
soc/tegra/bpmp.h \
|
||||
linux/sync_file.h \
|
||||
linux/cc_platform.h \
|
||||
asm/cpufeature.h \
|
||||
linux/mpi.h \
|
||||
asm/mshyperv.h
|
||||
|
||||
334
kernel-open/nvidia-drm/nv-kthread-q.c
Normal file
334
kernel-open/nvidia-drm/nv-kthread-q.c
Normal file
@@ -0,0 +1,334 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nv-kthread-q.h"
|
||||
#include "nv-list-helpers.h"
|
||||
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#if defined(NV_LINUX_BUG_H_PRESENT)
|
||||
#include <linux/bug.h>
|
||||
#else
|
||||
#include <asm/bug.h>
|
||||
#endif
|
||||
|
||||
// Today's implementation is a little simpler and more limited than the
|
||||
// API description allows for in nv-kthread-q.h. Details include:
|
||||
//
|
||||
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
|
||||
//
|
||||
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
|
||||
//
|
||||
// You can create any number of queues, each of which gets its own
|
||||
// named kernel thread (kthread). You can then insert arbitrary functions
|
||||
// into the queue, and those functions will be run in the context of the
|
||||
// queue's kthread.
|
||||
|
||||
#ifndef WARN
|
||||
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
|
||||
// to implement this, because such kernels won't be supported much longer.
|
||||
#define WARN(condition, format...) ({ \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
if (unlikely(__ret_warn_on)) \
|
||||
printk(KERN_ERR format); \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define NVQ_WARN(fmt, ...) \
|
||||
do { \
|
||||
if (in_interrupt()) { \
|
||||
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
|
||||
##__VA_ARGS__); \
|
||||
} \
|
||||
else { \
|
||||
WARN(1, "nv_kthread_q: task: %s: " fmt, \
|
||||
current->comm, \
|
||||
##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static int _main_loop(void *args)
|
||||
{
|
||||
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
|
||||
nv_kthread_q_item_t *q_item = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
while (1) {
|
||||
// Normally this thread is never interrupted. However,
|
||||
// down_interruptible (instead of down) is called here,
|
||||
// in order to avoid being classified as a potentially
|
||||
// hung task, by the kernel watchdog.
|
||||
while (down_interruptible(&q->q_sem))
|
||||
NVQ_WARN("Interrupted during semaphore wait\n");
|
||||
|
||||
if (atomic_read(&q->main_loop_should_exit))
|
||||
break;
|
||||
|
||||
spin_lock_irqsave(&q->q_lock, flags);
|
||||
|
||||
// The q_sem semaphore prevents us from getting here unless there is
|
||||
// at least one item in the list, so an empty list indicates a bug.
|
||||
if (unlikely(list_empty(&q->q_list_head))) {
|
||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
||||
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Consume one item from the queue
|
||||
q_item = list_first_entry(&q->q_list_head,
|
||||
nv_kthread_q_item_t,
|
||||
q_list_node);
|
||||
|
||||
list_del_init(&q_item->q_list_node);
|
||||
|
||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
||||
|
||||
// Run the item
|
||||
q_item->function_to_run(q_item->function_args);
|
||||
|
||||
// Make debugging a little simpler by clearing this between runs:
|
||||
q_item = NULL;
|
||||
}
|
||||
|
||||
while (!kthread_should_stop())
|
||||
schedule();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_kthread_q_stop(nv_kthread_q_t *q)
|
||||
{
|
||||
// check if queue has been properly initialized
|
||||
if (unlikely(!q->q_kthread))
|
||||
return;
|
||||
|
||||
nv_kthread_q_flush(q);
|
||||
|
||||
// If this assertion fires, then a caller likely either broke the API rules,
|
||||
// by adding items after calling nv_kthread_q_stop, or possibly messed up
|
||||
// with inadequate flushing of self-rescheduling q_items.
|
||||
if (unlikely(!list_empty(&q->q_list_head)))
|
||||
NVQ_WARN("list not empty after flushing\n");
|
||||
|
||||
if (likely(!atomic_read(&q->main_loop_should_exit))) {
|
||||
|
||||
atomic_set(&q->main_loop_should_exit, 1);
|
||||
|
||||
// Wake up the kthread so that it can see that it needs to stop:
|
||||
up(&q->q_sem);
|
||||
|
||||
kthread_stop(q->q_kthread);
|
||||
q->q_kthread = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
|
||||
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
|
||||
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
|
||||
// stack location ends up being a function of the core assigned to the current
|
||||
// thread, instead of being a function of the specified NUMA node. The cache was
|
||||
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
|
||||
// ("fork: Optimize task creation by caching two thread stacks per CPU if
|
||||
// CONFIG_VMAP_STACK=y")
|
||||
//
|
||||
// To work around the problematic cache, we create up to three kernel threads
|
||||
// -If the first thread's stack is resident on the preferred node, return this
|
||||
// thread.
|
||||
// -Otherwise, create a second thread. If its stack is resident on the
|
||||
// preferred node, stop the first thread and return this one.
|
||||
// -Otherwise, create a third thread. The stack allocator does not find a
|
||||
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
|
||||
// consideration. The first two threads are then stopped.
|
||||
//
|
||||
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
|
||||
//
|
||||
// This function is never invoked when there is no NUMA preference (preferred
|
||||
// node is NUMA_NO_NODE).
|
||||
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
||||
nv_kthread_q_t *q,
|
||||
int preferred_node,
|
||||
const char *q_name)
|
||||
{
|
||||
|
||||
unsigned i, j;
|
||||
static const unsigned attempts = 3;
|
||||
struct task_struct *thread[3];
|
||||
|
||||
for (i = 0;; i++) {
|
||||
struct page *stack;
|
||||
|
||||
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
|
||||
|
||||
if (unlikely(IS_ERR(thread[i]))) {
|
||||
|
||||
// Instead of failing, pick the previous thread, even if its
|
||||
// stack is not allocated on the preferred node.
|
||||
if (i > 0)
|
||||
i--;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// vmalloc is not used to allocate the stack, so simply return the
|
||||
// thread, even if its stack may not be allocated on the preferred node
|
||||
if (!is_vmalloc_addr(thread[i]->stack))
|
||||
break;
|
||||
|
||||
// Ran out of attempts - return thread even if its stack may not be
|
||||
// allocated on the preferred node
|
||||
if (i == (attempts - 1))
|
||||
break;
|
||||
|
||||
// Get the NUMA node where the first page of the stack is resident. If
|
||||
// it is the preferred node, select this thread.
|
||||
stack = vmalloc_to_page(thread[i]->stack);
|
||||
if (page_to_nid(stack) == preferred_node)
|
||||
break;
|
||||
}
|
||||
|
||||
for (j = i; j > 0; j--)
|
||||
kthread_stop(thread[j - 1]);
|
||||
|
||||
return thread[i];
|
||||
}
|
||||
|
||||
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
|
||||
{
|
||||
memset(q, 0, sizeof(*q));
|
||||
|
||||
INIT_LIST_HEAD(&q->q_list_head);
|
||||
spin_lock_init(&q->q_lock);
|
||||
sema_init(&q->q_sem, 0);
|
||||
|
||||
if (preferred_node == NV_KTHREAD_NO_NODE) {
|
||||
q->q_kthread = kthread_create(_main_loop, q, q_name);
|
||||
}
|
||||
else {
|
||||
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
|
||||
}
|
||||
|
||||
if (IS_ERR(q->q_kthread)) {
|
||||
int err = PTR_ERR(q->q_kthread);
|
||||
|
||||
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
|
||||
// safely called on it making error handling easier.
|
||||
q->q_kthread = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
wake_up_process(q->q_kthread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
|
||||
{
|
||||
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
|
||||
}
|
||||
|
||||
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
||||
// item was already pending in a queue.
|
||||
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irqsave(&q->q_lock, flags);
|
||||
|
||||
if (likely(list_empty(&q_item->q_list_node)))
|
||||
list_add_tail(&q_item->q_list_node, &q->q_list_head);
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
||||
|
||||
if (likely(ret))
|
||||
up(&q->q_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
|
||||
nv_q_func_t function_to_run,
|
||||
void *function_args)
|
||||
{
|
||||
INIT_LIST_HEAD(&q_item->q_list_node);
|
||||
q_item->function_to_run = function_to_run;
|
||||
q_item->function_args = function_args;
|
||||
}
|
||||
|
||||
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
|
||||
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
|
||||
nv_kthread_q_item_t *q_item)
|
||||
{
|
||||
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
|
||||
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
|
||||
"called with a non-alive q: 0x%p\n", q);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return _raw_q_schedule(q, q_item);
|
||||
}
|
||||
|
||||
static void _q_flush_function(void *args)
|
||||
{
|
||||
struct completion *completion = (struct completion *)args;
|
||||
complete(completion);
|
||||
}
|
||||
|
||||
|
||||
static void _raw_q_flush(nv_kthread_q_t *q)
|
||||
{
|
||||
nv_kthread_q_item_t q_item;
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
|
||||
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
|
||||
|
||||
_raw_q_schedule(q, &q_item);
|
||||
|
||||
// Wait for the flush item to run. Once it has run, then all of the
|
||||
// previously queued items in front of it will have run, so that means
|
||||
// the flush is complete.
|
||||
wait_for_completion(&completion);
|
||||
}
|
||||
|
||||
void nv_kthread_q_flush(nv_kthread_q_t *q)
|
||||
{
|
||||
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
|
||||
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
|
||||
"nv_kthread_q_stop. q: 0x%p\n", q);
|
||||
return;
|
||||
}
|
||||
|
||||
// This 2x flush is not a typing mistake. The queue really does have to be
|
||||
// flushed twice, in order to take care of the case of a q_item that
|
||||
// reschedules itself.
|
||||
_raw_q_flush(q);
|
||||
_raw_q_flush(q);
|
||||
}
|
||||
@@ -25,6 +25,15 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "nv-pci-table.h"
|
||||
#include "cpuopsys.h"
|
||||
|
||||
#if defined(NV_BSD)
|
||||
/* Define PCI classes that FreeBSD's linuxkpi is missing */
|
||||
#define PCI_VENDOR_ID_NVIDIA 0x10de
|
||||
#define PCI_CLASS_DISPLAY_VGA 0x0300
|
||||
#define PCI_CLASS_DISPLAY_3D 0x0302
|
||||
#define PCI_CLASS_BRIDGE_OTHER 0x0680
|
||||
#endif
|
||||
|
||||
/* Devices supported by RM */
|
||||
struct pci_device_id nv_pci_table[] = {
|
||||
@@ -48,7 +57,7 @@ struct pci_device_id nv_pci_table[] = {
|
||||
};
|
||||
|
||||
/* Devices supported by all drivers in nvidia.ko */
|
||||
struct pci_device_id nv_module_device_table[] = {
|
||||
struct pci_device_id nv_module_device_table[4] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
@@ -76,4 +85,6 @@ struct pci_device_id nv_module_device_table[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
#if defined(NV_LINUX)
|
||||
MODULE_DEVICE_TABLE(pci, nv_module_device_table);
|
||||
#endif
|
||||
|
||||
@@ -27,5 +27,6 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
extern struct pci_device_id nv_pci_table[];
|
||||
extern struct pci_device_id nv_module_device_table[4];
|
||||
|
||||
#endif /* _NV_PCI_TABLE_H_ */
|
||||
|
||||
@@ -43,9 +43,13 @@
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
typedef struct fence nv_dma_fence_t;
|
||||
typedef struct fence_ops nv_dma_fence_ops_t;
|
||||
typedef struct fence_cb nv_dma_fence_cb_t;
|
||||
typedef fence_func_t nv_dma_fence_func_t;
|
||||
#else
|
||||
typedef struct dma_fence nv_dma_fence_t;
|
||||
typedef struct dma_fence_ops nv_dma_fence_ops_t;
|
||||
typedef struct dma_fence_cb nv_dma_fence_cb_t;
|
||||
typedef dma_fence_func_t nv_dma_fence_func_t;
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
@@ -97,6 +101,14 @@ static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int nv_dma_fence_signal_locked(nv_dma_fence_t *fence) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
return fence_signal_locked(fence);
|
||||
#else
|
||||
return dma_fence_signal_locked(fence);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
return fence_context_alloc(num);
|
||||
@@ -108,7 +120,7 @@ static inline u64 nv_dma_fence_context_alloc(unsigned num) {
|
||||
static inline void
|
||||
nv_dma_fence_init(nv_dma_fence_t *fence,
|
||||
const nv_dma_fence_ops_t *ops,
|
||||
spinlock_t *lock, u64 context, unsigned seqno) {
|
||||
spinlock_t *lock, u64 context, uint64_t seqno) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
fence_init(fence, ops, lock, context, seqno);
|
||||
#else
|
||||
@@ -116,6 +128,29 @@ nv_dma_fence_init(nv_dma_fence_t *fence,
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
nv_dma_fence_set_error(nv_dma_fence_t *fence,
|
||||
int error) {
|
||||
#if defined(NV_DMA_FENCE_SET_ERROR_PRESENT)
|
||||
return dma_fence_set_error(fence, error);
|
||||
#elif defined(NV_FENCE_SET_ERROR_PRESENT)
|
||||
return fence_set_error(fence, error);
|
||||
#else
|
||||
fence->status = error;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int
|
||||
nv_dma_fence_add_callback(nv_dma_fence_t *fence,
|
||||
nv_dma_fence_cb_t *cb,
|
||||
nv_dma_fence_func_t func) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
return fence_add_callback(fence, cb, func);
|
||||
#else
|
||||
return dma_fence_add_callback(fence, cb, func);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
||||
|
||||
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */
|
||||
|
||||
@@ -121,6 +121,20 @@ static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_dma_resv_add_shared_fence(nv_dma_resv_t *obj,
|
||||
nv_dma_fence_t *fence)
|
||||
{
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
|
||||
dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_READ);
|
||||
#else
|
||||
dma_resv_add_shared_fence(obj, fence);
|
||||
#endif
|
||||
#else
|
||||
reservation_object_add_shared_fence(obj, fence);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
||||
|
||||
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#define __NVIDIA_DRM_CONFTEST_H__
|
||||
|
||||
#include "conftest.h"
|
||||
#include "nvtypes.h"
|
||||
|
||||
/*
|
||||
* NOTE: This file is expected to get included at the top before including any
|
||||
@@ -61,4 +62,132 @@
|
||||
#undef NV_DRM_FENCE_AVAILABLE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We can support color management if either drm_helper_crtc_enable_color_mgmt()
|
||||
* or drm_crtc_enable_color_mgmt() exist.
|
||||
*/
|
||||
#if defined(NV_DRM_HELPER_CRTC_ENABLE_COLOR_MGMT_PRESENT) || \
|
||||
defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
|
||||
#define NV_DRM_COLOR_MGMT_AVAILABLE
|
||||
#else
|
||||
#undef NV_DRM_COLOR_MGMT_AVAILABLE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Adapt to quirks in FreeBSD's Linux kernel compatibility layer.
|
||||
*/
|
||||
#if defined(NV_BSD)
|
||||
|
||||
#include <linux/rwsem.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/sx.h>
|
||||
|
||||
/* For nv_drm_gem_prime_force_fence_signal */
|
||||
#ifndef spin_is_locked
|
||||
#define spin_is_locked(lock) mtx_owned(lock.m)
|
||||
#endif
|
||||
|
||||
#ifndef rwsem_is_locked
|
||||
#define rwsem_is_locked(sem) (((sem)->sx.sx_lock & (SX_LOCK_SHARED)) \
|
||||
|| ((sem)->sx.sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* FreeBSD does not define vm_flags_t in its linuxkpi, since there is already
|
||||
* a FreeBSD vm_flags_t (of a different size) and they don't want the names to
|
||||
* collide. Temporarily redefine it when including nv-mm.h
|
||||
*/
|
||||
#define vm_flags_t unsigned long
|
||||
#include "nv-mm.h"
|
||||
#undef vm_flags_t
|
||||
|
||||
/*
|
||||
* sys/nv.h and nvidia/nv.h have the same header guard
|
||||
* we need to clear it for nvlist_t to get loaded
|
||||
*/
|
||||
#undef _NV_H_
|
||||
#include <sys/nv.h>
|
||||
|
||||
/*
|
||||
* For now just use set_page_dirty as the lock variant
|
||||
* is not ported for FreeBSD. (in progress). This calls
|
||||
* vm_page_dirty. Used in nv-mm.h
|
||||
*/
|
||||
#define set_page_dirty_lock set_page_dirty
|
||||
|
||||
/*
|
||||
* FreeBSD does not implement drm_atomic_state_free, simply
|
||||
* default to drm_atomic_state_put
|
||||
*/
|
||||
#define drm_atomic_state_free drm_atomic_state_put
|
||||
|
||||
#if __FreeBSD_version < 1300000
|
||||
/* redefine LIST_HEAD_INIT to the linux version */
|
||||
#include <linux/list.h>
|
||||
#define LIST_HEAD_INIT(name) LINUX_LIST_HEAD_INIT(name)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* FreeBSD currently has only vmf_insert_pfn_prot defined, and it has a
|
||||
* static assert warning not to use it since all of DRM's usages are in
|
||||
* loops with the vm obj lock(s) held. Instead we should use the lkpi
|
||||
* function itself directly. For us none of this applies so we can just
|
||||
* wrap it in our own definition of vmf_insert_pfn
|
||||
*/
|
||||
#ifndef NV_VMF_INSERT_PFN_PRESENT
|
||||
#define NV_VMF_INSERT_PFN_PRESENT 1
|
||||
|
||||
#if __FreeBSD_version < 1300000
|
||||
#define VM_SHARED (1 << 17)
|
||||
|
||||
/* Not present in 12.2 */
|
||||
static inline vm_fault_t
|
||||
lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
vm_object_t vm_obj = vma->vm_obj;
|
||||
vm_page_t page;
|
||||
vm_pindex_t pindex;
|
||||
|
||||
VM_OBJECT_ASSERT_WLOCKED(vm_obj);
|
||||
pindex = OFF_TO_IDX(addr - vma->vm_start);
|
||||
if (vma->vm_pfn_count == 0)
|
||||
vma->vm_pfn_first = pindex;
|
||||
MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
|
||||
|
||||
page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NORMAL);
|
||||
if (page == NULL) {
|
||||
page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
|
||||
vm_page_xbusy(page);
|
||||
if (vm_page_insert(page, vm_obj, pindex)) {
|
||||
vm_page_xunbusy(page);
|
||||
return (VM_FAULT_OOM);
|
||||
}
|
||||
page->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
pmap_page_set_memattr(page, pgprot2cachemode(prot));
|
||||
vma->vm_pfn_count++;
|
||||
|
||||
return (VM_FAULT_NOPAGE);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline vm_fault_t
|
||||
vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
vm_fault_t ret;
|
||||
|
||||
VM_OBJECT_WLOCK(vma->vm_obj);
|
||||
ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, vma->vm_page_prot);
|
||||
VM_OBJECT_WUNLOCK(vma->vm_obj);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* defined(NV_BSD) */
|
||||
|
||||
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include "nvidia-drm-helper.h"
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-connector.h"
|
||||
#include "nvidia-drm-crtc.h"
|
||||
#include "nvidia-drm-utils.h"
|
||||
#include "nvidia-drm-encoder.h"
|
||||
|
||||
@@ -42,6 +43,7 @@
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
static void nv_drm_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
@@ -98,7 +100,11 @@ __nv_drm_detect_encoder(struct NvKmsKapiDynamicDisplayParams *pDetectParams,
|
||||
break;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID)
|
||||
if (connector->override_edid) {
|
||||
#else
|
||||
if (drm_edid_override_connector_update(connector) > 0) {
|
||||
#endif
|
||||
const struct drm_property_blob *edid = connector->edid_blob_ptr;
|
||||
|
||||
if (edid->length <= sizeof(pDetectParams->edid.buffer)) {
|
||||
@@ -202,6 +208,11 @@ done:
|
||||
|
||||
nv_drm_free(pDetectParams);
|
||||
|
||||
if (status == connector_status_disconnected &&
|
||||
nv_connector->modeset_permission_filep) {
|
||||
nv_drm_connector_revoke_permissions(dev, nv_connector);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -338,10 +349,125 @@ nv_drm_connector_best_encoder(struct drm_connector *connector)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
|
||||
static const NvU32 __nv_drm_connector_supported_colorspaces =
|
||||
BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
|
||||
BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
|
||||
static int
|
||||
__nv_drm_connector_atomic_check(struct drm_connector *connector,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_connector_state *new_connector_state =
|
||||
drm_atomic_get_new_connector_state(state, connector);
|
||||
struct drm_connector_state *old_connector_state =
|
||||
drm_atomic_get_old_connector_state(state, connector);
|
||||
struct nv_drm_device *nv_dev = to_nv_device(connector->dev);
|
||||
|
||||
struct drm_crtc *crtc = new_connector_state->crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct nv_drm_crtc_state *nv_crtc_state;
|
||||
struct NvKmsKapiHeadRequestedConfig *req_config;
|
||||
|
||||
if (!crtc) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
nv_crtc_state = to_nv_crtc_state(crtc_state);
|
||||
req_config = &nv_crtc_state->req_config;
|
||||
|
||||
/*
|
||||
* Override metadata for the entire head instead of allowing NVKMS to derive
|
||||
* it from the layers' metadata.
|
||||
*
|
||||
* This is the metadata that will sent to the display, and if applicable,
|
||||
* layers will be tone mapped to this metadata rather than that of the
|
||||
* display.
|
||||
*/
|
||||
req_config->flags.hdrInfoFrameChanged =
|
||||
!drm_connector_atomic_hdr_metadata_equal(old_connector_state,
|
||||
new_connector_state);
|
||||
if (new_connector_state->hdr_output_metadata &&
|
||||
new_connector_state->hdr_output_metadata->data) {
|
||||
|
||||
/*
|
||||
* Note that HDMI definitions are used here even though we might not
|
||||
* be using HDMI. While that seems odd, it is consistent with
|
||||
* upstream behavior.
|
||||
*/
|
||||
|
||||
struct hdr_output_metadata *hdr_metadata =
|
||||
new_connector_state->hdr_output_metadata->data;
|
||||
struct hdr_metadata_infoframe *info_frame =
|
||||
&hdr_metadata->hdmi_metadata_type1;
|
||||
unsigned int i;
|
||||
|
||||
if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i++) {
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].x =
|
||||
info_frame->display_primaries[i].x;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].y =
|
||||
info_frame->display_primaries[i].y;
|
||||
}
|
||||
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.x =
|
||||
info_frame->white_point.x;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.y =
|
||||
info_frame->white_point.y;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxDisplayMasteringLuminance =
|
||||
info_frame->max_display_mastering_luminance;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.minDisplayMasteringLuminance =
|
||||
info_frame->min_display_mastering_luminance;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxCLL =
|
||||
info_frame->max_cll;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxFALL =
|
||||
info_frame->max_fall;
|
||||
|
||||
req_config->modeSetConfig.hdrInfoFrame.eotf = info_frame->eotf;
|
||||
|
||||
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_TRUE;
|
||||
} else {
|
||||
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_FALSE;
|
||||
}
|
||||
|
||||
req_config->flags.colorimetryChanged =
|
||||
(old_connector_state->colorspace != new_connector_state->colorspace);
|
||||
// When adding a case here, also add to __nv_drm_connector_supported_colorspaces
|
||||
switch (new_connector_state->colorspace) {
|
||||
case DRM_MODE_COLORIMETRY_DEFAULT:
|
||||
req_config->modeSetConfig.colorimetry =
|
||||
NVKMS_OUTPUT_COLORIMETRY_DEFAULT;
|
||||
break;
|
||||
case DRM_MODE_COLORIMETRY_BT2020_RGB:
|
||||
case DRM_MODE_COLORIMETRY_BT2020_YCC:
|
||||
// Ignore RGB/YCC
|
||||
// See https://patchwork.freedesktop.org/patch/525496/?series=111865&rev=4
|
||||
req_config->modeSetConfig.colorimetry =
|
||||
NVKMS_OUTPUT_COLORIMETRY_BT2100;
|
||||
break;
|
||||
default:
|
||||
// XXX HDR TODO: Add support for more color spaces
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported color space");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
|
||||
|
||||
static const struct drm_connector_helper_funcs nv_connector_helper_funcs = {
|
||||
.get_modes = nv_drm_connector_get_modes,
|
||||
.mode_valid = nv_drm_connector_mode_valid,
|
||||
.best_encoder = nv_drm_connector_best_encoder,
|
||||
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
|
||||
.atomic_check = __nv_drm_connector_atomic_check,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct drm_connector*
|
||||
@@ -367,6 +493,8 @@ nv_drm_connector_new(struct drm_device *dev,
|
||||
nv_connector->physicalIndex = physicalIndex;
|
||||
nv_connector->type = type;
|
||||
nv_connector->internal = internal;
|
||||
nv_connector->modeset_permission_filep = NULL;
|
||||
nv_connector->modeset_permission_crtc = NULL;
|
||||
|
||||
strcpy(nv_connector->dpAddress, dpAddress);
|
||||
|
||||
@@ -392,6 +520,32 @@ nv_drm_connector_new(struct drm_device *dev,
|
||||
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
|
||||
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_HDMI) {
|
||||
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
|
||||
if (drm_mode_create_hdmi_colorspace_property(
|
||||
&nv_connector->base,
|
||||
__nv_drm_connector_supported_colorspaces) == 0) {
|
||||
#else
|
||||
if (drm_mode_create_hdmi_colorspace_property(&nv_connector->base) == 0) {
|
||||
#endif
|
||||
drm_connector_attach_colorspace_property(&nv_connector->base);
|
||||
}
|
||||
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
|
||||
} else if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DP) {
|
||||
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
|
||||
if (drm_mode_create_dp_colorspace_property(
|
||||
&nv_connector->base,
|
||||
__nv_drm_connector_supported_colorspaces) == 0) {
|
||||
#else
|
||||
if (drm_mode_create_dp_colorspace_property(&nv_connector->base) == 0) {
|
||||
#endif
|
||||
drm_connector_attach_colorspace_property(&nv_connector->base);
|
||||
}
|
||||
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
|
||||
}
|
||||
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
|
||||
|
||||
/* Register connector with DRM subsystem */
|
||||
|
||||
ret = drm_connector_register(&nv_connector->base);
|
||||
@@ -469,4 +623,26 @@ done:
|
||||
return connector;
|
||||
}
|
||||
|
||||
/*
|
||||
* Revoke the permissions on this connector.
|
||||
*/
|
||||
bool nv_drm_connector_revoke_permissions(struct drm_device *dev,
|
||||
struct nv_drm_connector* nv_connector)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
bool ret = true;
|
||||
|
||||
if (nv_connector->modeset_permission_crtc) {
|
||||
if (nv_connector->nv_detected_encoder) {
|
||||
ret = nvKms->revokePermissions(
|
||||
nv_dev->pDevice, nv_connector->modeset_permission_crtc->head,
|
||||
nv_connector->nv_detected_encoder->hDisplay);
|
||||
}
|
||||
nv_connector->modeset_permission_crtc->modeset_permission_filep = NULL;
|
||||
nv_connector->modeset_permission_crtc = NULL;
|
||||
}
|
||||
nv_connector->modeset_permission_filep = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -51,6 +51,20 @@ struct nv_drm_connector {
|
||||
|
||||
atomic_t connection_status_dirty;
|
||||
|
||||
/**
|
||||
* @modeset_permission_filep:
|
||||
*
|
||||
* The filep using this connector with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
|
||||
*/
|
||||
struct drm_file *modeset_permission_filep;
|
||||
|
||||
/**
|
||||
* @modeset_permission_crtc:
|
||||
*
|
||||
* The crtc using this connector with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
|
||||
*/
|
||||
struct nv_drm_crtc *modeset_permission_crtc;
|
||||
|
||||
struct drm_connector base;
|
||||
};
|
||||
|
||||
@@ -84,6 +98,9 @@ nv_drm_get_connector(struct drm_device *dev,
|
||||
NvBool internal,
|
||||
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]);
|
||||
|
||||
bool nv_drm_connector_revoke_permissions(struct drm_device *dev,
|
||||
struct nv_drm_connector *nv_connector);
|
||||
|
||||
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_CONNECTOR_H__ */
|
||||
|
||||
@@ -44,8 +44,15 @@
|
||||
|
||||
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
|
||||
#include <linux/nvhost.h>
|
||||
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
|
||||
#include <linux/host1x-next.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_DRM_COLOR_MGMT_H_PRESENT)
|
||||
#include <drm/drm_color_mgmt.h>
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
|
||||
static int
|
||||
nv_drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
|
||||
@@ -85,11 +92,22 @@ static void nv_drm_plane_destroy(struct drm_plane *plane)
|
||||
nv_drm_free(nv_plane);
|
||||
}
|
||||
|
||||
static inline void
|
||||
plane_config_clear(struct NvKmsKapiLayerConfig *layerConfig)
|
||||
{
|
||||
if (layerConfig == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
memset(layerConfig, 0, sizeof(*layerConfig));
|
||||
layerConfig->csc = NVKMS_IDENTITY_CSC_MATRIX;
|
||||
}
|
||||
|
||||
static inline void
|
||||
plane_req_config_disable(struct NvKmsKapiLayerRequestedConfig *req_config)
|
||||
{
|
||||
/* Clear layer config */
|
||||
memset(&req_config->config, 0, sizeof(req_config->config));
|
||||
plane_config_clear(&req_config->config);
|
||||
|
||||
/* Set flags to get cleared layer config applied */
|
||||
req_config->flags.surfaceChanged = NV_TRUE;
|
||||
@@ -106,6 +124,45 @@ cursor_req_config_disable(struct NvKmsKapiCursorRequestedConfig *req_config)
|
||||
req_config->flags.surfaceChanged = NV_TRUE;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
|
||||
static void color_mgmt_config_ctm_to_csc(struct NvKmsCscMatrix *nvkms_csc,
|
||||
struct drm_color_ctm *drm_ctm)
|
||||
{
|
||||
int y;
|
||||
|
||||
/* CTM is a 3x3 matrix while ours is 3x4. Zero out the last column. */
|
||||
nvkms_csc->m[0][3] = nvkms_csc->m[1][3] = nvkms_csc->m[2][3] = 0;
|
||||
|
||||
for (y = 0; y < 3; y++) {
|
||||
int x;
|
||||
|
||||
for (x = 0; x < 3; x++) {
|
||||
/*
|
||||
* Values in the CTM are encoded in S31.32 sign-magnitude fixed-
|
||||
* point format, while NvKms CSC values are signed 2's-complement
|
||||
* S15.16 (Ssign-extend12-3.16?) fixed-point format.
|
||||
*/
|
||||
NvU64 ctmVal = drm_ctm->matrix[y*3 + x];
|
||||
NvU64 signBit = ctmVal & (1ULL << 63);
|
||||
NvU64 magnitude = ctmVal & ~signBit;
|
||||
|
||||
/*
|
||||
* Drop the low 16 bits of the fractional part and the high 17 bits
|
||||
* of the integral part. Drop 17 bits to avoid corner cases where
|
||||
* the highest resulting bit is a 1, causing the `cscVal = -cscVal`
|
||||
* line to result in a positive number.
|
||||
*/
|
||||
NvS32 cscVal = (magnitude >> 16) & ((1ULL << 31) - 1);
|
||||
if (signBit) {
|
||||
cscVal = -cscVal;
|
||||
}
|
||||
|
||||
nvkms_csc->m[y][x] = cscVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
|
||||
|
||||
static void
|
||||
cursor_plane_req_config_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *plane_state,
|
||||
@@ -232,6 +289,8 @@ plane_req_config_update(struct drm_plane *plane,
|
||||
.dstY = plane_state->crtc_y,
|
||||
.dstWidth = plane_state->crtc_w,
|
||||
.dstHeight = plane_state->crtc_h,
|
||||
|
||||
.csc = old_config.csc
|
||||
},
|
||||
};
|
||||
|
||||
@@ -361,6 +420,21 @@ plane_req_config_update(struct drm_plane *plane,
|
||||
|
||||
if (nv_drm_plane_state->fd_user_ptr) {
|
||||
req_config->config.syncptParams.postSyncptRequested = true;
|
||||
}
|
||||
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
|
||||
if (plane_state->fence != NULL) {
|
||||
int ret = host1x_fence_extract(
|
||||
plane_state->fence,
|
||||
&req_config->config.syncptParams.preSyncptId,
|
||||
&req_config->config.syncptParams.preSyncptValue);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
req_config->config.syncptParams.preSyncptSpecified = true;
|
||||
}
|
||||
|
||||
if (nv_drm_plane_state->fd_user_ptr) {
|
||||
req_config->config.syncptParams.postSyncptRequested = true;
|
||||
}
|
||||
#else
|
||||
return -1;
|
||||
@@ -382,27 +456,25 @@ plane_req_config_update(struct drm_plane *plane,
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i ++) {
|
||||
req_config->config.hdrMetadata.displayPrimaries[i].x =
|
||||
req_config->config.hdrMetadata.val.displayPrimaries[i].x =
|
||||
info_frame->display_primaries[i].x;
|
||||
req_config->config.hdrMetadata.displayPrimaries[i].y =
|
||||
req_config->config.hdrMetadata.val.displayPrimaries[i].y =
|
||||
info_frame->display_primaries[i].y;
|
||||
}
|
||||
|
||||
req_config->config.hdrMetadata.whitePoint.x =
|
||||
req_config->config.hdrMetadata.val.whitePoint.x =
|
||||
info_frame->white_point.x;
|
||||
req_config->config.hdrMetadata.whitePoint.y =
|
||||
req_config->config.hdrMetadata.val.whitePoint.y =
|
||||
info_frame->white_point.y;
|
||||
req_config->config.hdrMetadata.maxDisplayMasteringLuminance =
|
||||
req_config->config.hdrMetadata.val.maxDisplayMasteringLuminance =
|
||||
info_frame->max_display_mastering_luminance;
|
||||
req_config->config.hdrMetadata.minDisplayMasteringLuminance =
|
||||
req_config->config.hdrMetadata.val.minDisplayMasteringLuminance =
|
||||
info_frame->min_display_mastering_luminance;
|
||||
req_config->config.hdrMetadata.maxCLL =
|
||||
req_config->config.hdrMetadata.val.maxCLL =
|
||||
info_frame->max_cll;
|
||||
req_config->config.hdrMetadata.maxFALL =
|
||||
req_config->config.hdrMetadata.val.maxFALL =
|
||||
info_frame->max_fall;
|
||||
|
||||
req_config->config.hdrMetadataSpecified = true;
|
||||
|
||||
switch (info_frame->eotf) {
|
||||
case HDMI_EOTF_SMPTE_ST2084:
|
||||
req_config->config.tf = NVKMS_OUTPUT_TF_PQ;
|
||||
@@ -415,10 +487,21 @@ plane_req_config_update(struct drm_plane *plane,
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported EOTF");
|
||||
return -1;
|
||||
}
|
||||
|
||||
req_config->config.hdrMetadata.enabled = true;
|
||||
} else {
|
||||
req_config->config.hdrMetadataSpecified = false;
|
||||
req_config->config.hdrMetadata.enabled = false;
|
||||
req_config->config.tf = NVKMS_OUTPUT_TF_NONE;
|
||||
}
|
||||
|
||||
req_config->flags.hdrMetadataChanged =
|
||||
((old_config.hdrMetadata.enabled !=
|
||||
req_config->config.hdrMetadata.enabled) ||
|
||||
memcmp(&old_config.hdrMetadata.val,
|
||||
&req_config->config.hdrMetadata.val,
|
||||
sizeof(struct NvKmsHDRStaticMetadata)));
|
||||
|
||||
req_config->flags.tfChanged = (old_config.tf != req_config->config.tf);
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -547,6 +630,24 @@ static int nv_drm_plane_atomic_check(struct drm_plane *plane,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
|
||||
if (crtc_state->color_mgmt_changed) {
|
||||
/*
|
||||
* According to the comment in the Linux kernel's
|
||||
* drivers/gpu/drm/drm_color_mgmt.c, if this property is NULL,
|
||||
* the CTM needs to be changed to the identity matrix
|
||||
*/
|
||||
if (crtc_state->ctm) {
|
||||
color_mgmt_config_ctm_to_csc(&plane_requested_config->config.csc,
|
||||
(struct drm_color_ctm *)crtc_state->ctm->data);
|
||||
} else {
|
||||
plane_requested_config->config.csc = NVKMS_IDENTITY_CSC_MATRIX;
|
||||
}
|
||||
plane_requested_config->config.cscUseMain = NV_FALSE;
|
||||
plane_requested_config->flags.cscChanged = NV_TRUE;
|
||||
}
|
||||
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
|
||||
|
||||
if (__is_async_flip_requested(plane, crtc_state)) {
|
||||
/*
|
||||
* Async flip requests that the flip happen 'as soon as
|
||||
@@ -637,6 +738,38 @@ static int nv_drm_plane_atomic_get_property(
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* nv_drm_plane_atomic_reset - plane state reset hook
|
||||
* @plane: DRM plane
|
||||
*
|
||||
* Allocate an empty DRM plane state.
|
||||
*/
|
||||
static void nv_drm_plane_atomic_reset(struct drm_plane *plane)
|
||||
{
|
||||
struct nv_drm_plane_state *nv_plane_state =
|
||||
nv_drm_calloc(1, sizeof(*nv_plane_state));
|
||||
|
||||
if (!nv_plane_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
drm_atomic_helper_plane_reset(plane);
|
||||
|
||||
/*
|
||||
* The drm atomic helper function allocates a state object that is the wrong
|
||||
* size. Copy its contents into the one we allocated above and replace the
|
||||
* pointer.
|
||||
*/
|
||||
if (plane->state) {
|
||||
nv_plane_state->base = *plane->state;
|
||||
kfree(plane->state);
|
||||
plane->state = &nv_plane_state->base;
|
||||
} else {
|
||||
kfree(nv_plane_state);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static struct drm_plane_state *
|
||||
nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
|
||||
{
|
||||
@@ -675,9 +808,11 @@ static inline void __nv_drm_plane_atomic_destroy_state(
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
|
||||
struct nv_drm_plane_state *nv_drm_plane_state =
|
||||
to_nv_drm_plane_state(state);
|
||||
drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
|
||||
{
|
||||
struct nv_drm_plane_state *nv_drm_plane_state =
|
||||
to_nv_drm_plane_state(state);
|
||||
drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -694,7 +829,7 @@ static const struct drm_plane_funcs nv_plane_funcs = {
|
||||
.update_plane = drm_atomic_helper_update_plane,
|
||||
.disable_plane = drm_atomic_helper_disable_plane,
|
||||
.destroy = nv_drm_plane_destroy,
|
||||
.reset = drm_atomic_helper_plane_reset,
|
||||
.reset = nv_drm_plane_atomic_reset,
|
||||
.atomic_get_property = nv_drm_plane_atomic_get_property,
|
||||
.atomic_set_property = nv_drm_plane_atomic_set_property,
|
||||
.atomic_duplicate_state = nv_drm_plane_atomic_duplicate_state,
|
||||
@@ -751,6 +886,52 @@ static inline void nv_drm_crtc_duplicate_req_head_modeset_config(
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct nv_drm_crtc_state *nv_drm_crtc_state_alloc(void)
|
||||
{
|
||||
struct nv_drm_crtc_state *nv_state = nv_drm_calloc(1, sizeof(*nv_state));
|
||||
int i;
|
||||
|
||||
if (nv_state == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(nv_state->req_config.layerRequestedConfig); i++) {
|
||||
plane_config_clear(&nv_state->req_config.layerRequestedConfig[i].config);
|
||||
}
|
||||
return nv_state;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* nv_drm_atomic_crtc_reset - crtc state reset hook
|
||||
* @crtc: DRM crtc
|
||||
*
|
||||
* Allocate an empty DRM crtc state.
|
||||
*/
|
||||
static void nv_drm_atomic_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct nv_drm_crtc_state *nv_state = nv_drm_crtc_state_alloc();
|
||||
|
||||
if (!nv_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
drm_atomic_helper_crtc_reset(crtc);
|
||||
|
||||
/*
|
||||
* The drm atomic helper function allocates a state object that is the wrong
|
||||
* size. Copy its contents into the one we allocated above and replace the
|
||||
* pointer.
|
||||
*/
|
||||
if (crtc->state) {
|
||||
nv_state->base = *crtc->state;
|
||||
kfree(crtc->state);
|
||||
crtc->state = &nv_state->base;
|
||||
} else {
|
||||
kfree(nv_state);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* nv_drm_atomic_crtc_duplicate_state - crtc state duplicate hook
|
||||
* @crtc: DRM crtc
|
||||
@@ -762,7 +943,7 @@ static inline void nv_drm_crtc_duplicate_req_head_modeset_config(
|
||||
static struct drm_crtc_state*
|
||||
nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
{
|
||||
struct nv_drm_crtc_state *nv_state = nv_drm_calloc(1, sizeof(*nv_state));
|
||||
struct nv_drm_crtc_state *nv_state = nv_drm_crtc_state_alloc();
|
||||
|
||||
if (nv_state == NULL) {
|
||||
return NULL;
|
||||
@@ -783,6 +964,9 @@ nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
&(to_nv_crtc_state(crtc->state)->req_config),
|
||||
&nv_state->req_config);
|
||||
|
||||
nv_state->ilut_ramps = NULL;
|
||||
nv_state->olut_ramps = NULL;
|
||||
|
||||
return &nv_state->base;
|
||||
}
|
||||
|
||||
@@ -806,16 +990,22 @@ static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
|
||||
__nv_drm_atomic_helper_crtc_destroy_state(crtc, &nv_state->base);
|
||||
|
||||
nv_drm_free(nv_state->ilut_ramps);
|
||||
nv_drm_free(nv_state->olut_ramps);
|
||||
|
||||
nv_drm_free(nv_state);
|
||||
}
|
||||
|
||||
static struct drm_crtc_funcs nv_crtc_funcs = {
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.reset = drm_atomic_helper_crtc_reset,
|
||||
.reset = nv_drm_atomic_crtc_reset,
|
||||
.destroy = nv_drm_crtc_destroy,
|
||||
.atomic_duplicate_state = nv_drm_atomic_crtc_duplicate_state,
|
||||
.atomic_destroy_state = nv_drm_atomic_crtc_destroy_state,
|
||||
#if defined(NV_DRM_ATOMIC_HELPER_LEGACY_GAMMA_SET_PRESENT)
|
||||
.gamma_set = drm_atomic_helper_legacy_gamma_set,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -849,6 +1039,132 @@ static int head_modeset_config_attach_connector(
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
|
||||
static int color_mgmt_config_copy_lut(struct NvKmsLutRamps *nvkms_lut,
|
||||
struct drm_color_lut *drm_lut,
|
||||
uint64_t lut_len)
|
||||
{
|
||||
uint64_t i = 0;
|
||||
if (lut_len != NVKMS_LUT_ARRAY_SIZE) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Both NvKms and drm LUT values are 16-bit linear values. NvKms LUT ramps
|
||||
* are in arrays in a single struct while drm LUT ramps are an array of
|
||||
* structs.
|
||||
*/
|
||||
for (i = 0; i < lut_len; i++) {
|
||||
nvkms_lut->red[i] = drm_lut[i].red;
|
||||
nvkms_lut->green[i] = drm_lut[i].green;
|
||||
nvkms_lut->blue[i] = drm_lut[i].blue;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int color_mgmt_config_set_luts(struct nv_drm_crtc_state *nv_crtc_state,
|
||||
struct NvKmsKapiHeadRequestedConfig *req_config)
|
||||
{
|
||||
struct NvKmsKapiHeadModeSetConfig *modeset_config =
|
||||
&req_config->modeSetConfig;
|
||||
struct drm_crtc_state *crtc_state = &nv_crtc_state->base;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* According to the comment in the Linux kernel's
|
||||
* drivers/gpu/drm/drm_color_mgmt.c, if either property is NULL, that LUT
|
||||
* needs to be changed to a linear LUT
|
||||
*/
|
||||
|
||||
req_config->flags.lutChanged = NV_TRUE;
|
||||
if (crtc_state->degamma_lut) {
|
||||
struct drm_color_lut *degamma_lut = NULL;
|
||||
uint64_t degamma_len = 0;
|
||||
|
||||
nv_crtc_state->ilut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->ilut_ramps));
|
||||
if (!nv_crtc_state->ilut_ramps) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
degamma_lut = (struct drm_color_lut *)crtc_state->degamma_lut->data;
|
||||
degamma_len = crtc_state->degamma_lut->length /
|
||||
sizeof(struct drm_color_lut);
|
||||
|
||||
if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->ilut_ramps,
|
||||
degamma_lut,
|
||||
degamma_len)) != 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
modeset_config->lut.input.specified = NV_TRUE;
|
||||
modeset_config->lut.input.depth = 30; /* specify the full LUT */
|
||||
modeset_config->lut.input.start = 0;
|
||||
modeset_config->lut.input.end = degamma_len - 1;
|
||||
modeset_config->lut.input.pRamps = nv_crtc_state->ilut_ramps;
|
||||
} else {
|
||||
/* setting input.end to 0 is equivalent to disabling the LUT, which
|
||||
* should be equivalent to a linear LUT */
|
||||
modeset_config->lut.input.specified = NV_TRUE;
|
||||
modeset_config->lut.input.depth = 30; /* specify the full LUT */
|
||||
modeset_config->lut.input.start = 0;
|
||||
modeset_config->lut.input.end = 0;
|
||||
modeset_config->lut.input.pRamps = NULL;
|
||||
|
||||
}
|
||||
|
||||
if (crtc_state->gamma_lut) {
|
||||
struct drm_color_lut *gamma_lut = NULL;
|
||||
uint64_t gamma_len = 0;
|
||||
|
||||
nv_crtc_state->olut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->olut_ramps));
|
||||
if (!nv_crtc_state->olut_ramps) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
gamma_lut = (struct drm_color_lut *)crtc_state->gamma_lut->data;
|
||||
gamma_len = crtc_state->gamma_lut->length /
|
||||
sizeof(struct drm_color_lut);
|
||||
|
||||
if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->olut_ramps,
|
||||
gamma_lut,
|
||||
gamma_len)) != 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
modeset_config->lut.output.specified = NV_TRUE;
|
||||
modeset_config->lut.output.enabled = NV_TRUE;
|
||||
modeset_config->lut.output.pRamps = nv_crtc_state->olut_ramps;
|
||||
} else {
|
||||
/* disabling the output LUT should be equivalent to setting a linear
|
||||
* LUT */
|
||||
modeset_config->lut.output.specified = NV_TRUE;
|
||||
modeset_config->lut.output.enabled = NV_FALSE;
|
||||
modeset_config->lut.output.pRamps = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
/* free allocated state */
|
||||
nv_drm_free(nv_crtc_state->ilut_ramps);
|
||||
nv_drm_free(nv_crtc_state->olut_ramps);
|
||||
|
||||
/* remove dangling pointers */
|
||||
nv_crtc_state->ilut_ramps = NULL;
|
||||
nv_crtc_state->olut_ramps = NULL;
|
||||
modeset_config->lut.input.pRamps = NULL;
|
||||
modeset_config->lut.output.pRamps = NULL;
|
||||
|
||||
/* prevent attempts at reading NULLs */
|
||||
modeset_config->lut.input.specified = NV_FALSE;
|
||||
modeset_config->lut.output.specified = NV_FALSE;
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
|
||||
|
||||
/**
|
||||
* nv_drm_crtc_atomic_check() can fail after it has modified
|
||||
* the 'nv_drm_crtc_state::req_config', that is fine because 'nv_drm_crtc_state'
|
||||
@@ -870,6 +1186,9 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct NvKmsKapiHeadRequestedConfig *req_config =
|
||||
&nv_crtc_state->req_config;
|
||||
int ret = 0;
|
||||
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
|
||||
struct nv_drm_device *nv_dev = to_nv_device(crtc_state->crtc->dev);
|
||||
#endif
|
||||
|
||||
if (crtc_state->mode_changed) {
|
||||
drm_mode_to_nvkms_display_mode(&crtc_state->mode,
|
||||
@@ -908,6 +1227,25 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
req_config->flags.activeChanged = NV_TRUE;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
|
||||
req_config->modeSetConfig.vrrEnabled = crtc_state->vrr_enabled;
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
|
||||
if (nv_dev->drmMasterChangedSinceLastAtomicCommit &&
|
||||
(crtc_state->degamma_lut ||
|
||||
crtc_state->ctm ||
|
||||
crtc_state->gamma_lut)) {
|
||||
|
||||
crtc_state->color_mgmt_changed = NV_TRUE;
|
||||
}
|
||||
if (crtc_state->color_mgmt_changed) {
|
||||
if ((ret = color_mgmt_config_set_luts(nv_crtc_state, req_config)) != 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1139,6 +1477,8 @@ nv_drm_plane_create(struct drm_device *dev,
|
||||
plane,
|
||||
validLayerRRTransforms);
|
||||
|
||||
nv_drm_free(formats);
|
||||
|
||||
return plane;
|
||||
|
||||
failed_plane_init:
|
||||
@@ -1170,7 +1510,7 @@ static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev,
|
||||
goto failed;
|
||||
}
|
||||
|
||||
nv_state = nv_drm_calloc(1, sizeof(*nv_state));
|
||||
nv_state = nv_drm_crtc_state_alloc();
|
||||
if (nv_state == NULL) {
|
||||
goto failed_state_alloc;
|
||||
}
|
||||
@@ -1181,6 +1521,7 @@ static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev,
|
||||
nv_crtc->head = head;
|
||||
INIT_LIST_HEAD(&nv_crtc->flip_list);
|
||||
spin_lock_init(&nv_crtc->flip_list_lock);
|
||||
nv_crtc->modeset_permission_filep = NULL;
|
||||
|
||||
ret = drm_crtc_init_with_planes(nv_dev->dev,
|
||||
&nv_crtc->base,
|
||||
@@ -1202,6 +1543,22 @@ static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev,
|
||||
|
||||
drm_crtc_helper_add(&nv_crtc->base, &nv_crtc_helper_funcs);
|
||||
|
||||
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
|
||||
#if defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
|
||||
drm_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE, true,
|
||||
NVKMS_LUT_ARRAY_SIZE);
|
||||
#else
|
||||
drm_helper_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE,
|
||||
NVKMS_LUT_ARRAY_SIZE);
|
||||
#endif
|
||||
ret = drm_mode_crtc_set_gamma_size(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE);
|
||||
if (ret != 0) {
|
||||
NV_DRM_DEV_LOG_WARN(
|
||||
nv_dev,
|
||||
"Failed to initialize legacy gamma support for head %u", head);
|
||||
}
|
||||
#endif
|
||||
|
||||
return &nv_crtc->base;
|
||||
|
||||
failed_init_crtc:
|
||||
@@ -1310,10 +1667,16 @@ static void NvKmsKapiCrcsToDrm(const struct NvKmsKapiCrcs *crcs,
|
||||
{
|
||||
drmCrcs->outputCrc32.value = crcs->outputCrc32.value;
|
||||
drmCrcs->outputCrc32.supported = crcs->outputCrc32.supported;
|
||||
drmCrcs->outputCrc32.__pad0 = 0;
|
||||
drmCrcs->outputCrc32.__pad1 = 0;
|
||||
drmCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value;
|
||||
drmCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported;
|
||||
drmCrcs->rasterGeneratorCrc32.__pad0 = 0;
|
||||
drmCrcs->rasterGeneratorCrc32.__pad1 = 0;
|
||||
drmCrcs->compositorCrc32.value = crcs->compositorCrc32.value;
|
||||
drmCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported;
|
||||
drmCrcs->compositorCrc32.__pad0 = 0;
|
||||
drmCrcs->compositorCrc32.__pad1 = 0;
|
||||
}
|
||||
|
||||
int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
|
||||
@@ -1326,10 +1689,10 @@ int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
|
||||
struct NvKmsKapiCrcs crc32;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
return -ENOENT;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
crtc = nv_drm_crtc_find(dev, params->crtc_id);
|
||||
crtc = nv_drm_crtc_find(dev, filep, params->crtc_id);
|
||||
if (!crtc) {
|
||||
return -ENOENT;
|
||||
}
|
||||
@@ -1354,10 +1717,10 @@ int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev,
|
||||
struct NvKmsKapiCrcs crc32;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
return -ENOENT;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
crtc = nv_drm_crtc_find(dev, params->crtc_id);
|
||||
crtc = nv_drm_crtc_find(dev, filep, params->crtc_id);
|
||||
if (!crtc) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
@@ -35,38 +35,9 @@
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
|
||||
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) || defined(NV_DRM_ROTATION_AVAILABLE)
|
||||
/* For DRM_ROTATE_* , DRM_REFLECT_* */
|
||||
#include <drm/drm_blend.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_ROTATION_AVAILABLE)
|
||||
/* For DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* */
|
||||
#include <uapi/drm/drm_mode.h>
|
||||
#endif
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nvkms-kapi.h"
|
||||
|
||||
#if defined(NV_DRM_ROTATION_AVAILABLE)
|
||||
/*
|
||||
* 19-05-2017 c2c446ad29437bb92b157423c632286608ebd3ec has added
|
||||
* DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* to UAPI and removed
|
||||
* DRM_ROTATE_* and DRM_MODE_REFLECT_*
|
||||
*/
|
||||
#if !defined(DRM_MODE_ROTATE_0)
|
||||
#define DRM_MODE_ROTATE_0 DRM_ROTATE_0
|
||||
#define DRM_MODE_ROTATE_90 DRM_ROTATE_90
|
||||
#define DRM_MODE_ROTATE_180 DRM_ROTATE_180
|
||||
#define DRM_MODE_ROTATE_270 DRM_ROTATE_270
|
||||
#define DRM_MODE_REFLECT_X DRM_REFLECT_X
|
||||
#define DRM_MODE_REFLECT_Y DRM_REFLECT_Y
|
||||
#define DRM_MODE_ROTATE_MASK DRM_ROTATE_MASK
|
||||
#define DRM_MODE_REFLECT_MASK DRM_REFLECT_MASK
|
||||
#endif
|
||||
|
||||
#endif //NV_DRM_ROTATION_AVAILABLE
|
||||
|
||||
struct nv_drm_crtc {
|
||||
NvU32 head;
|
||||
|
||||
@@ -85,6 +56,13 @@ struct nv_drm_crtc {
|
||||
*/
|
||||
spinlock_t flip_list_lock;
|
||||
|
||||
/**
|
||||
* @modeset_permission_filep:
|
||||
*
|
||||
* The filep using this crtc with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
|
||||
*/
|
||||
struct drm_file *modeset_permission_filep;
|
||||
|
||||
struct drm_crtc base;
|
||||
};
|
||||
|
||||
@@ -151,6 +129,9 @@ struct nv_drm_crtc_state {
|
||||
*/
|
||||
struct NvKmsKapiHeadRequestedConfig req_config;
|
||||
|
||||
struct NvKmsLutRamps *ilut_ramps;
|
||||
struct NvKmsLutRamps *olut_ramps;
|
||||
|
||||
/**
|
||||
* @nv_flip:
|
||||
*
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -31,6 +31,12 @@ int nv_drm_probe_devices(void);
|
||||
|
||||
void nv_drm_remove_devices(void);
|
||||
|
||||
void nv_drm_suspend_resume(NvBool suspend);
|
||||
|
||||
void nv_drm_register_drm_device(const nv_gpu_info_t *);
|
||||
|
||||
void nv_drm_update_drm_driver_features(void);
|
||||
|
||||
#endif /* defined(NV_DRM_AVAILABLE) */
|
||||
|
||||
#endif /* __NVIDIA_DRM_DRV_H__ */
|
||||
|
||||
@@ -205,7 +205,7 @@ nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay)
|
||||
encoder = nv_drm_encoder_new(dev,
|
||||
displayInfo->handle,
|
||||
connectorInfo->signalFormat,
|
||||
get_crtc_mask(dev, connectorInfo->headMask));
|
||||
get_crtc_mask(dev, displayInfo->headMask));
|
||||
|
||||
if (IS_ERR(encoder)) {
|
||||
ret = PTR_ERR(encoder);
|
||||
@@ -300,7 +300,7 @@ void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
|
||||
|
||||
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
|
||||
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
|
||||
}
|
||||
|
||||
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
|
||||
@@ -347,6 +347,6 @@ void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
|
||||
drm_reinit_primary_mode_group(dev);
|
||||
#endif
|
||||
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -150,6 +150,14 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
|
||||
if (nv_fb->nv_gem[i] != NULL) {
|
||||
if (!nvKms->isMemoryValidForDisplay(nv_dev->pDevice,
|
||||
nv_fb->nv_gem[i]->pMemory)) {
|
||||
NV_DRM_DEV_LOG_INFO(
|
||||
nv_dev,
|
||||
"Framebuffer memory not appropriate for scanout");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
params.planes[i].memory = nv_fb->nv_gem[i]->pMemory;
|
||||
params.planes[i].offset = nv_fb->base.offsets[i];
|
||||
params.planes[i].pitch = nv_fb->base.pitches[i];
|
||||
@@ -164,6 +172,17 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
|
||||
params.layout = (modifier & 0x10) ?
|
||||
NvKmsSurfaceMemoryLayoutBlockLinear :
|
||||
NvKmsSurfaceMemoryLayoutPitch;
|
||||
|
||||
// See definition of DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D, we are testing
|
||||
// 'c', the lossless compression field of the modifier
|
||||
if (params.layout == NvKmsSurfaceMemoryLayoutBlockLinear &&
|
||||
(modifier >> 23) & 0x7) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Cannot create FB from compressible surface allocation");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
params.log2GobsPerBlockY = modifier & 0xf;
|
||||
} else {
|
||||
params.explicit_layout = false;
|
||||
@@ -174,11 +193,14 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
|
||||
nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, ¶ms);
|
||||
if (nv_fb->pSurface == NULL) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Failed to create NvKmsKapiSurface");
|
||||
drm_framebuffer_cleanup(&nv_fb->base);
|
||||
return -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
drm_framebuffer_cleanup(&nv_fb->base);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
|
||||
@@ -218,7 +240,7 @@ struct drm_framebuffer *nv_drm_internal_framebuffer_create(
|
||||
if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(
|
||||
nv_dev,
|
||||
"Invalid format modifier for framebuffer object: 0x%016llx",
|
||||
"Invalid format modifier for framebuffer object: 0x%016" NvU64_fmtx,
|
||||
modifier);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
1842
kernel-open/nvidia-drm/nvidia-drm-fence.c
Normal file
1842
kernel-open/nvidia-drm/nvidia-drm-fence.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -35,11 +35,27 @@ struct drm_device;
|
||||
int nv_drm_fence_supported_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_fence_context_create_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_gem_fence_attach_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
struct drm_file *filep);
|
||||
|
||||
int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
struct drm_file *filep);
|
||||
|
||||
int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
struct drm_file *filep);
|
||||
|
||||
int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
struct drm_file *filep);
|
||||
|
||||
#endif /* NV_DRM_FENCE_AVAILABLE */
|
||||
|
||||
@@ -71,12 +71,42 @@ static int __nv_drm_gem_dma_buf_create_mmap_offset(
|
||||
static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
#if defined(NV_LINUX)
|
||||
struct dma_buf_attachment *attach = nv_gem->base.import_attach;
|
||||
struct dma_buf *dma_buf = attach->dmabuf;
|
||||
#endif
|
||||
struct file *old_file;
|
||||
int ret;
|
||||
|
||||
/* check if buffer supports mmap */
|
||||
#if defined(NV_BSD)
|
||||
/*
|
||||
* Most of the FreeBSD DRM code refers to struct file*, which is actually
|
||||
* a struct linux_file*. The dmabuf code in FreeBSD is not actually plumbed
|
||||
* through the same linuxkpi bits it seems (probably so it can be used
|
||||
* elsewhere), so dma_buf->file really is a native FreeBSD struct file...
|
||||
*/
|
||||
if (!nv_gem->base.filp->f_op->mmap)
|
||||
return -EINVAL;
|
||||
|
||||
/* readjust the vma */
|
||||
get_file(nv_gem->base.filp);
|
||||
old_file = vma->vm_file;
|
||||
vma->vm_file = nv_gem->base.filp;
|
||||
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);
|
||||
|
||||
ret = nv_gem->base.filp->f_op->mmap(nv_gem->base.filp, vma);
|
||||
|
||||
if (ret) {
|
||||
/* restore old parameters on failure */
|
||||
vma->vm_file = old_file;
|
||||
vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node);
|
||||
fput(nv_gem->base.filp);
|
||||
} else {
|
||||
if (old_file)
|
||||
fput(old_file);
|
||||
}
|
||||
#else
|
||||
if (!dma_buf->file->f_op->mmap)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -84,18 +114,20 @@ static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
|
||||
get_file(dma_buf->file);
|
||||
old_file = vma->vm_file;
|
||||
vma->vm_file = dma_buf->file;
|
||||
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);;
|
||||
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);
|
||||
|
||||
ret = dma_buf->file->f_op->mmap(dma_buf->file, vma);
|
||||
|
||||
if (ret) {
|
||||
/* restore old parameters on failure */
|
||||
vma->vm_file = old_file;
|
||||
vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node);
|
||||
fput(dma_buf->file);
|
||||
} else {
|
||||
if (old_file)
|
||||
fput(old_file);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -37,6 +37,9 @@
|
||||
#endif
|
||||
|
||||
#include <linux/io.h>
|
||||
#if defined(NV_BSD)
|
||||
#include <vm/vm_pageout.h>
|
||||
#endif
|
||||
|
||||
#include "nv-mm.h"
|
||||
|
||||
@@ -93,7 +96,17 @@ static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
|
||||
if (nv_nvkms_memory->pages_count == 0) {
|
||||
pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress;
|
||||
pfn >>= PAGE_SHIFT;
|
||||
#if defined(NV_LINUX)
|
||||
/*
|
||||
* FreeBSD doesn't set pgoff. We instead have pfn be the base physical
|
||||
* address, and we will calculate the index pidx from the virtual address.
|
||||
*
|
||||
* This only works because linux_cdev_pager_populate passes the pidx as
|
||||
* vmf->virtual_address. Then we turn the virtual address
|
||||
* into a physical page number.
|
||||
*/
|
||||
pfn += page_offset;
|
||||
#endif
|
||||
} else {
|
||||
BUG_ON(page_offset >= nv_nvkms_memory->pages_count);
|
||||
pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]);
|
||||
@@ -131,11 +144,11 @@ static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
|
||||
const struct nv_drm_gem_object *nv_gem_src);
|
||||
|
||||
static int __nv_drm_gem_nvkms_map(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct NvKmsKapiMemory *pMemory,
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory,
|
||||
uint64_t size)
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = nv_nvkms_memory->base.nv_dev;
|
||||
struct NvKmsKapiMemory *pMemory = nv_nvkms_memory->base.pMemory;
|
||||
|
||||
if (!nv_dev->hasVideoMemory) {
|
||||
return 0;
|
||||
}
|
||||
@@ -153,7 +166,7 @@ static int __nv_drm_gem_nvkms_map(
|
||||
|
||||
nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc(
|
||||
(uintptr_t)nv_nvkms_memory->pPhysicalAddress,
|
||||
size);
|
||||
nv_nvkms_memory->base.base.size);
|
||||
|
||||
if (!nv_nvkms_memory->pWriteCombinedIORemapAddress) {
|
||||
NV_DRM_DEV_LOG_INFO(
|
||||
@@ -167,6 +180,22 @@ static int __nv_drm_gem_nvkms_map(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *__nv_drm_gem_nvkms_prime_vmap(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
||||
to_nv_nvkms_memory(nv_gem);
|
||||
|
||||
if (!nv_nvkms_memory->physically_mapped) {
|
||||
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
||||
if (ret) {
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
return nv_nvkms_memory->pWriteCombinedIORemapAddress;
|
||||
}
|
||||
|
||||
static int __nv_drm_gem_map_nvkms_memory_offset(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
@@ -176,10 +205,7 @@ static int __nv_drm_gem_map_nvkms_memory_offset(
|
||||
to_nv_nvkms_memory(nv_gem);
|
||||
|
||||
if (!nv_nvkms_memory->physically_mapped) {
|
||||
int ret = __nv_drm_gem_nvkms_map(nv_dev,
|
||||
nv_nvkms_memory->base.pMemory,
|
||||
nv_nvkms_memory,
|
||||
nv_nvkms_memory->base.base.size);
|
||||
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@@ -201,7 +227,7 @@ static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
|
||||
nv_dev,
|
||||
"Cannot create sg_table for NvKmsKapiMemory 0x%p",
|
||||
nv_gem->pMemory);
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev,
|
||||
@@ -214,6 +240,7 @@ static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
|
||||
const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = {
|
||||
.free = __nv_drm_gem_nvkms_memory_free,
|
||||
.prime_dup = __nv_drm_gem_nvkms_prime_dup,
|
||||
.prime_vmap = __nv_drm_gem_nvkms_prime_vmap,
|
||||
.mmap = __nv_drm_gem_nvkms_mmap,
|
||||
.handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault,
|
||||
.create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset,
|
||||
@@ -229,6 +256,15 @@ static int __nv_drm_nvkms_gem_obj_init(
|
||||
NvU64 *pages = NULL;
|
||||
NvU32 numPages = 0;
|
||||
|
||||
if ((size % PAGE_SIZE) != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"NvKmsKapiMemory 0x%p size should be in a multiple of page size to "
|
||||
"create a gem object",
|
||||
pMemory);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_nvkms_memory->pPhysicalAddress = NULL;
|
||||
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
|
||||
nv_nvkms_memory->physically_mapped = false;
|
||||
@@ -300,7 +336,7 @@ int nv_drm_dumb_create(
|
||||
ret = -ENOMEM;
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to allocate NvKmsKapiMemory for dumb object of size %llu",
|
||||
"Failed to allocate NvKmsKapiMemory for dumb object of size %" NvU64_fmtu,
|
||||
args->size);
|
||||
goto nvkms_alloc_memory_failed;
|
||||
}
|
||||
@@ -314,7 +350,7 @@ int nv_drm_dumb_create(
|
||||
* to use dumb buffers for software rendering, so they're not much use
|
||||
* without a CPU mapping.
|
||||
*/
|
||||
ret = __nv_drm_gem_nvkms_map(nv_dev, pMemory, nv_nvkms_memory, args->size);
|
||||
ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
||||
if (ret) {
|
||||
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
|
||||
goto fail;
|
||||
@@ -344,7 +380,7 @@ int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
|
||||
int ret;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = -EINVAL;
|
||||
ret = -EOPNOTSUPP;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@@ -394,7 +430,7 @@ int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
|
||||
int ret = 0;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = -EINVAL;
|
||||
ret = -EOPNOTSUPP;
|
||||
goto done;
|
||||
}
|
||||
|
||||
@@ -447,11 +483,11 @@ int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
|
||||
int ret = 0;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = -EINVAL;
|
||||
ret = -EOPNOTSUPP;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (p->__pad != 0) {
|
||||
if ((p->__pad0 != 0) || (p->__pad1 != 0)) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field");
|
||||
goto failed;
|
||||
@@ -583,11 +619,13 @@ int nv_drm_dumb_map_offset(struct drm_file *file,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
|
||||
int nv_drm_dumb_destroy(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle)
|
||||
{
|
||||
return drm_gem_handle_delete(file, handle);
|
||||
}
|
||||
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
|
||||
|
||||
#endif
|
||||
|
||||
@@ -97,9 +97,11 @@ int nv_drm_dumb_map_offset(struct drm_file *file,
|
||||
struct drm_device *dev, uint32_t handle,
|
||||
uint64_t *offset);
|
||||
|
||||
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
|
||||
int nv_drm_dumb_destroy(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle);
|
||||
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
|
||||
|
||||
struct drm_gem_object *nv_drm_gem_nvkms_prime_import(
|
||||
struct drm_device *dev,
|
||||
|
||||
@@ -36,6 +36,10 @@
|
||||
#include "linux/mm.h"
|
||||
#include "nv-mm.h"
|
||||
|
||||
#if defined(NV_BSD)
|
||||
#include <vm/vm_pageout.h>
|
||||
#endif
|
||||
|
||||
static inline
|
||||
void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
@@ -92,9 +96,9 @@ static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags &= ~VM_IO;
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
nv_vm_flags_clear(vma, VM_PFNMAP);
|
||||
nv_vm_flags_clear(vma, VM_IO);
|
||||
nv_vm_flags_set(vma, VM_MIXEDMAP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -113,6 +117,10 @@ static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
|
||||
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
|
||||
|
||||
BUG_ON(page_offset >= nv_user_memory->pages_count);
|
||||
|
||||
#if !defined(NV_LINUX)
|
||||
ret = vmf_insert_pfn(vma, address, page_to_pfn(nv_user_memory->pages[page_offset]));
|
||||
#else /* !defined(NV_LINUX) */
|
||||
ret = vm_insert_page(vma, address, nv_user_memory->pages[page_offset]);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
@@ -131,6 +139,7 @@ static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
break;
|
||||
}
|
||||
#endif /* !defined(NV_LINUX) */
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -170,7 +179,7 @@ int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
|
||||
if ((params->size % PAGE_SIZE) != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Userspace memory 0x%llx size should be in a multiple of page "
|
||||
"Userspace memory 0x%" NvU64_fmtx " size should be in a multiple of page "
|
||||
"size to create a gem object",
|
||||
params->address);
|
||||
return -EINVAL;
|
||||
@@ -183,7 +192,7 @@ int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
|
||||
if (ret != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lock user pages for address 0x%llx: %d",
|
||||
"Failed to lock user pages for address 0x%" NvU64_fmtx ": %d",
|
||||
params->address, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-ioctl.h"
|
||||
#include "nvidia-drm-prime-fence.h"
|
||||
#include "nvidia-drm-fence.h"
|
||||
#include "nvidia-drm-gem.h"
|
||||
#include "nvidia-drm-gem-nvkms-memory.h"
|
||||
#include "nvidia-drm-gem-user-memory.h"
|
||||
@@ -81,10 +81,13 @@ typedef struct dma_buf_map nv_sysio_map_t;
|
||||
static int nv_drm_gem_vmap(struct drm_gem_object *gem,
|
||||
nv_sysio_map_t *map)
|
||||
{
|
||||
map->vaddr = nv_drm_gem_prime_vmap(gem);
|
||||
if (map->vaddr == NULL) {
|
||||
void *vaddr = nv_drm_gem_prime_vmap(gem);
|
||||
if (vaddr == NULL) {
|
||||
return -ENOMEM;
|
||||
} else if (IS_ERR(vaddr)) {
|
||||
return PTR_ERR(vaddr);
|
||||
}
|
||||
map->vaddr = vaddr;
|
||||
map->is_iomem = true;
|
||||
return 0;
|
||||
}
|
||||
@@ -132,13 +135,8 @@ void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
|
||||
|
||||
/* Initialize the gem object */
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
nv_dma_resv_init(&nv_gem->resv);
|
||||
|
||||
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
nv_gem->base.resv = &nv_gem->resv;
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT)
|
||||
@@ -212,8 +210,7 @@ void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address)
|
||||
nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(obj);
|
||||
|
||||
return &nv_gem->resv;
|
||||
return nv_drm_gem_res_obj(nv_gem);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -299,7 +296,7 @@ int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
nv_vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -322,7 +319,7 @@ int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
|
||||
struct nv_drm_gem_object *nv_gem = NULL;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle);
|
||||
|
||||
@@ -45,6 +45,8 @@
|
||||
#include "nvidia-dma-resv-helper.h"
|
||||
#endif
|
||||
|
||||
#include "linux/dma-buf.h"
|
||||
|
||||
struct nv_drm_gem_object;
|
||||
|
||||
struct nv_drm_gem_object_funcs {
|
||||
@@ -71,7 +73,7 @@ struct nv_drm_gem_object {
|
||||
|
||||
struct NvKmsKapiMemory *pMemory;
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
nv_dma_resv_t resv;
|
||||
#endif
|
||||
};
|
||||
@@ -93,6 +95,16 @@ static inline struct nv_drm_gem_object *to_nv_gem_object(
|
||||
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
|
||||
*/
|
||||
|
||||
static inline void
|
||||
nv_drm_gem_object_reference(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
|
||||
drm_gem_object_get(&nv_gem->base);
|
||||
#else
|
||||
drm_gem_object_reference(&nv_gem->base);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
@@ -177,6 +189,17 @@ static inline int nv_drm_gem_handle_create(struct drm_file *filp,
|
||||
return drm_gem_handle_create(filp, &nv_gem->base, handle);
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
return nv_gem->base.resv;
|
||||
#else
|
||||
return nv_gem->base.dma_buf ? nv_gem->base.dma_buf->resv : &nv_gem->resv;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
const struct nv_drm_gem_object_funcs * const ops,
|
||||
|
||||
@@ -28,6 +28,8 @@
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-helper.h"
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-crtc.h"
|
||||
|
||||
#include "nvmisc.h"
|
||||
|
||||
@@ -148,6 +150,18 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
|
||||
goto free;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_ROTATION_AVAILABLE)
|
||||
nv_drm_for_each_plane(plane, dev) {
|
||||
plane_state = drm_atomic_get_plane_state(state, plane);
|
||||
if (IS_ERR(plane_state)) {
|
||||
ret = PTR_ERR(plane_state);
|
||||
goto free;
|
||||
}
|
||||
|
||||
plane_state->rotation = DRM_MODE_ROTATE_0;
|
||||
}
|
||||
#endif
|
||||
|
||||
nv_drm_for_each_connector_in_state(state, conn, conn_state, i) {
|
||||
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
|
||||
if (ret < 0)
|
||||
|
||||
@@ -35,6 +35,35 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) || defined(NV_DRM_ROTATION_AVAILABLE)
|
||||
/* For DRM_ROTATE_* , DRM_REFLECT_* */
|
||||
#include <drm/drm_blend.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_ROTATION_AVAILABLE)
|
||||
/* For DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* */
|
||||
#include <uapi/drm/drm_mode.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_ROTATION_AVAILABLE)
|
||||
/*
|
||||
* 19-05-2017 c2c446ad29437bb92b157423c632286608ebd3ec has added
|
||||
* DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* to UAPI and removed
|
||||
* DRM_ROTATE_* and DRM_REFLECT_*
|
||||
*/
|
||||
#if !defined(DRM_MODE_ROTATE_0)
|
||||
#define DRM_MODE_ROTATE_0 DRM_ROTATE_0
|
||||
#define DRM_MODE_ROTATE_90 DRM_ROTATE_90
|
||||
#define DRM_MODE_ROTATE_180 DRM_ROTATE_180
|
||||
#define DRM_MODE_ROTATE_270 DRM_ROTATE_270
|
||||
#define DRM_MODE_REFLECT_X DRM_REFLECT_X
|
||||
#define DRM_MODE_REFLECT_Y DRM_REFLECT_Y
|
||||
#define DRM_MODE_ROTATE_MASK DRM_ROTATE_MASK
|
||||
#define DRM_MODE_REFLECT_MASK DRM_REFLECT_MASK
|
||||
#endif
|
||||
|
||||
#endif //NV_DRM_ROTATION_AVAILABLE
|
||||
|
||||
/*
|
||||
* drm_dev_put() is added by commit 9a96f55034e41b4e002b767e9218d55f03bdff7d
|
||||
* (2017-09-26) and drm_dev_unref() is removed by
|
||||
@@ -277,11 +306,63 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
|
||||
for_each_plane_in_state(__state, plane, plane_state, __i)
|
||||
#endif
|
||||
|
||||
static inline struct drm_crtc *nv_drm_crtc_find(struct drm_device *dev,
|
||||
uint32_t id)
|
||||
/*
|
||||
* for_each_new_plane_in_state() was added by kernel commit
|
||||
* 581e49fe6b411f407102a7f2377648849e0fa37f which was Signed-off-by:
|
||||
* Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*
|
||||
* This commit also added the old_state and new_state pointers to
|
||||
* __drm_planes_state. Because of this, the best that can be done on kernel
|
||||
* versions without this macro is for_each_plane_in_state.
|
||||
*/
|
||||
|
||||
/**
|
||||
* nv_drm_for_each_new_plane_in_state - iterate over all planes in an atomic update
|
||||
* @__state: &struct drm_atomic_state pointer
|
||||
* @plane: &struct drm_plane iteration cursor
|
||||
* @new_plane_state: &struct drm_plane_state iteration cursor for the new state
|
||||
* @__i: int iteration cursor, for macro-internal use
|
||||
*
|
||||
* This iterates over all planes in an atomic update, tracking only the new
|
||||
* state. This is useful in enable functions, where we need the new state the
|
||||
* hardware should be in when the atomic commit operation has completed.
|
||||
*/
|
||||
#if !defined(for_each_new_plane_in_state)
|
||||
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
|
||||
nv_drm_for_each_plane_in_state(__state, plane, new_plane_state, __i)
|
||||
#else
|
||||
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
|
||||
for_each_new_plane_in_state(__state, plane, new_plane_state, __i)
|
||||
#endif
|
||||
|
||||
static inline struct drm_connector *
|
||||
nv_drm_connector_lookup(struct drm_device *dev, struct drm_file *filep,
|
||||
uint32_t id)
|
||||
{
|
||||
#if !defined(NV_DRM_CONNECTOR_LOOKUP_PRESENT)
|
||||
return drm_connector_find(dev, id);
|
||||
#elif defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
|
||||
return drm_connector_lookup(dev, filep, id);
|
||||
#else
|
||||
return drm_connector_lookup(dev, id);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_drm_connector_put(struct drm_connector *connector)
|
||||
{
|
||||
#if defined(NV_DRM_CONNECTOR_PUT_PRESENT)
|
||||
drm_connector_put(connector);
|
||||
#elif defined(NV_DRM_CONNECTOR_LOOKUP_PRESENT)
|
||||
drm_connector_unreference(connector);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct drm_crtc *
|
||||
nv_drm_crtc_find(struct drm_device *dev, struct drm_file *filep, uint32_t id)
|
||||
{
|
||||
#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
|
||||
return drm_crtc_find(dev, NULL /* file_priv */, id);
|
||||
return drm_crtc_find(dev, filep, id);
|
||||
#else
|
||||
return drm_crtc_find(dev, id);
|
||||
#endif
|
||||
@@ -297,6 +378,30 @@ static inline struct drm_encoder *nv_drm_encoder_find(struct drm_device *dev,
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_DRM_AUTH_H_PRESENT)
|
||||
#include <drm/drm_auth.h>
|
||||
#endif
|
||||
#if defined(NV_DRM_DRM_FILE_H_PRESENT)
|
||||
#include <drm/drm_file.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* drm_file_get_master() added by commit 56f0729a510f ("drm: protect drm_master
|
||||
* pointers in drm_lease.c") in v5.15 (2021-07-20)
|
||||
*/
|
||||
static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep)
|
||||
{
|
||||
#if defined(NV_DRM_FILE_GET_MASTER_PRESENT)
|
||||
return drm_file_get_master(filep);
|
||||
#else
|
||||
if (filep->master) {
|
||||
return drm_master_get(filep->master);
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* drm_connector_for_each_possible_encoder() is added by commit
|
||||
* 83aefbb887b59df0b3520965c3701e01deacfc52 which was Signed-off-by:
|
||||
@@ -507,6 +612,19 @@ static inline int nv_drm_format_num_planes(uint32_t format)
|
||||
|
||||
#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
|
||||
|
||||
/*
|
||||
* DRM_UNLOCKED was removed with linux-next commit 2798ffcc1d6a ("drm: Remove
|
||||
* locking for legacy ioctls and DRM_UNLOCKED"), but it was previously made
|
||||
* implicit for all non-legacy DRM driver IOCTLs since Linux v4.10 commit
|
||||
* fa5386459f06 "drm: Used DRM_LEGACY for all legacy functions" (Linux v4.4
|
||||
* commit ea487835e887 "drm: Enforce unlocked ioctl operation for kms driver
|
||||
* ioctls" previously did it only for drivers that set the DRM_MODESET flag), so
|
||||
* it was effectively a no-op anyway.
|
||||
*/
|
||||
#if !defined(NV_DRM_UNLOCKED_IOCTL_FLAG_PRESENT)
|
||||
#define DRM_UNLOCKED 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* drm_vma_offset_exact_lookup_locked() were added
|
||||
* by kernel commit 2225cfe46bcc which was Signed-off-by:
|
||||
|
||||
@@ -34,8 +34,8 @@
|
||||
#define DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY 0x02
|
||||
#define DRM_NVIDIA_GET_DEV_INFO 0x03
|
||||
#define DRM_NVIDIA_FENCE_SUPPORTED 0x04
|
||||
#define DRM_NVIDIA_FENCE_CONTEXT_CREATE 0x05
|
||||
#define DRM_NVIDIA_GEM_FENCE_ATTACH 0x06
|
||||
#define DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE 0x05
|
||||
#define DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH 0x06
|
||||
#define DRM_NVIDIA_GET_CLIENT_CAPABILITY 0x08
|
||||
#define DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY 0x09
|
||||
#define DRM_NVIDIA_GEM_MAP_OFFSET 0x0a
|
||||
@@ -43,6 +43,15 @@
|
||||
#define DRM_NVIDIA_GET_CRTC_CRC32_V2 0x0c
|
||||
#define DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY 0x0d
|
||||
#define DRM_NVIDIA_GEM_IDENTIFY_OBJECT 0x0e
|
||||
#define DRM_NVIDIA_DMABUF_SUPPORTED 0x0f
|
||||
#define DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID 0x10
|
||||
#define DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID 0x11
|
||||
#define DRM_NVIDIA_GRANT_PERMISSIONS 0x12
|
||||
#define DRM_NVIDIA_REVOKE_PERMISSIONS 0x13
|
||||
#define DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE 0x14
|
||||
#define DRM_NVIDIA_SEMSURF_FENCE_CREATE 0x15
|
||||
#define DRM_NVIDIA_SEMSURF_FENCE_WAIT 0x16
|
||||
#define DRM_NVIDIA_SEMSURF_FENCE_ATTACH 0x17
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \
|
||||
@@ -62,53 +71,92 @@
|
||||
*
|
||||
* 'warning: suggest parentheses around arithmetic in operand of |'
|
||||
*/
|
||||
#if defined(NV_LINUX)
|
||||
#if defined(NV_LINUX) || defined(NV_BSD)
|
||||
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \
|
||||
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED)
|
||||
#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED \
|
||||
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_DMABUF_SUPPORTED)
|
||||
#else
|
||||
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED 0
|
||||
#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED 0
|
||||
#endif
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_FENCE_CONTEXT_CREATE \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_CONTEXT_CREATE), \
|
||||
struct drm_nvidia_fence_context_create_params)
|
||||
#define DRM_IOCTL_NVIDIA_PRIME_FENCE_CONTEXT_CREATE \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE),\
|
||||
struct drm_nvidia_prime_fence_context_create_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_FENCE_ATTACH \
|
||||
DRM_IOW((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_FENCE_ATTACH), \
|
||||
struct drm_nvidia_gem_fence_attach_params)
|
||||
#define DRM_IOCTL_NVIDIA_GEM_PRIME_FENCE_ATTACH \
|
||||
DRM_IOW((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH), \
|
||||
struct drm_nvidia_gem_prime_fence_attach_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_CLIENT_CAPABILITY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CLIENT_CAPABILITY), \
|
||||
#define DRM_IOCTL_NVIDIA_GET_CLIENT_CAPABILITY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CLIENT_CAPABILITY), \
|
||||
struct drm_nvidia_get_client_capability_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32 \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32), \
|
||||
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32 \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32), \
|
||||
struct drm_nvidia_get_crtc_crc32_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32_V2 \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32_V2), \
|
||||
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32_V2 \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32_V2), \
|
||||
struct drm_nvidia_get_crtc_crc32_v2_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_NVKMS_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY), \
|
||||
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_NVKMS_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY), \
|
||||
struct drm_nvidia_gem_export_nvkms_memory_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_MAP_OFFSET \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_MAP_OFFSET), \
|
||||
#define DRM_IOCTL_NVIDIA_GEM_MAP_OFFSET \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_MAP_OFFSET), \
|
||||
struct drm_nvidia_gem_map_offset_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_ALLOC_NVKMS_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY), \
|
||||
#define DRM_IOCTL_NVIDIA_GEM_ALLOC_NVKMS_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY), \
|
||||
struct drm_nvidia_gem_alloc_nvkms_memory_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_DMABUF_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY), \
|
||||
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_DMABUF_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY), \
|
||||
struct drm_nvidia_gem_export_dmabuf_memory_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_IDENTIFY_OBJECT \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IDENTIFY_OBJECT), \
|
||||
#define DRM_IOCTL_NVIDIA_GEM_IDENTIFY_OBJECT \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IDENTIFY_OBJECT), \
|
||||
struct drm_nvidia_gem_identify_object_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID),\
|
||||
struct drm_nvidia_get_dpy_id_for_connector_id_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID),\
|
||||
struct drm_nvidia_get_connector_id_for_dpy_id_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GRANT_PERMISSIONS), \
|
||||
struct drm_nvidia_grant_permissions_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_REVOKE_PERMISSIONS \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_REVOKE_PERMISSIONS), \
|
||||
struct drm_nvidia_revoke_permissions_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CTX_CREATE \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + \
|
||||
DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE), \
|
||||
struct drm_nvidia_semsurf_fence_ctx_create_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CREATE \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + \
|
||||
DRM_NVIDIA_SEMSURF_FENCE_CREATE), \
|
||||
struct drm_nvidia_semsurf_fence_create_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_WAIT \
|
||||
DRM_IOW((DRM_COMMAND_BASE + \
|
||||
DRM_NVIDIA_SEMSURF_FENCE_WAIT), \
|
||||
struct drm_nvidia_semsurf_fence_wait_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_ATTACH \
|
||||
DRM_IOW((DRM_COMMAND_BASE + \
|
||||
DRM_NVIDIA_SEMSURF_FENCE_ATTACH), \
|
||||
struct drm_nvidia_semsurf_fence_attach_params)
|
||||
|
||||
struct drm_nvidia_gem_import_nvkms_memory_params {
|
||||
uint64_t mem_size; /* IN */
|
||||
|
||||
@@ -130,13 +178,18 @@ struct drm_nvidia_get_dev_info_params {
|
||||
uint32_t gpu_id; /* OUT */
|
||||
uint32_t primary_index; /* OUT; the "card%d" value */
|
||||
|
||||
/* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these */
|
||||
uint32_t supports_alloc; /* OUT */
|
||||
/* The generic_page_kind, page_kind_generation, and sector_layout
|
||||
* fields are only valid if supports_alloc is true.
|
||||
* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these. */
|
||||
uint32_t generic_page_kind; /* OUT */
|
||||
uint32_t page_kind_generation; /* OUT */
|
||||
uint32_t sector_layout; /* OUT */
|
||||
uint32_t supports_sync_fd; /* OUT */
|
||||
uint32_t supports_semsurf; /* OUT */
|
||||
};
|
||||
|
||||
struct drm_nvidia_fence_context_create_params {
|
||||
struct drm_nvidia_prime_fence_context_create_params {
|
||||
uint32_t handle; /* OUT GEM handle to fence context */
|
||||
|
||||
uint32_t index; /* IN Index of semaphore to use for fencing */
|
||||
@@ -151,10 +204,11 @@ struct drm_nvidia_fence_context_create_params {
|
||||
uint64_t event_nvkms_params_size; /* IN */
|
||||
};
|
||||
|
||||
struct drm_nvidia_gem_fence_attach_params {
|
||||
struct drm_nvidia_gem_prime_fence_attach_params {
|
||||
uint32_t handle; /* IN GEM handle to attach fence to */
|
||||
uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */
|
||||
uint32_t sem_thresh; /* IN Semaphore value to reach before signal */
|
||||
uint32_t __pad;
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_client_capability_params {
|
||||
@@ -166,6 +220,8 @@ struct drm_nvidia_get_client_capability_params {
|
||||
struct drm_nvidia_crtc_crc32 {
|
||||
uint32_t value; /* Read value, undefined if supported is false */
|
||||
uint8_t supported; /* Supported boolean, true if readable by hardware */
|
||||
uint8_t __pad0;
|
||||
uint16_t __pad1;
|
||||
};
|
||||
|
||||
struct drm_nvidia_crtc_crc32_v2_out {
|
||||
@@ -205,10 +261,11 @@ struct drm_nvidia_gem_alloc_nvkms_memory_params {
|
||||
uint32_t handle; /* OUT */
|
||||
uint8_t block_linear; /* IN */
|
||||
uint8_t compressible; /* IN/OUT */
|
||||
uint16_t __pad;
|
||||
uint16_t __pad0;
|
||||
|
||||
uint64_t memory_size; /* IN */
|
||||
uint32_t flags; /* IN */
|
||||
uint32_t __pad1;
|
||||
};
|
||||
|
||||
struct drm_nvidia_gem_export_dmabuf_memory_params {
|
||||
@@ -232,4 +289,100 @@ struct drm_nvidia_gem_identify_object_params {
|
||||
drm_nvidia_gem_object_type object_type; /* OUT GEM object type */
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_dpy_id_for_connector_id_params {
|
||||
uint32_t connectorId; /* IN */
|
||||
uint32_t dpyId; /* OUT */
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_connector_id_for_dpy_id_params {
|
||||
uint32_t dpyId; /* IN */
|
||||
uint32_t connectorId; /* OUT */
|
||||
};
|
||||
|
||||
enum drm_nvidia_permissions_type {
|
||||
NV_DRM_PERMISSIONS_TYPE_MODESET = 2,
|
||||
NV_DRM_PERMISSIONS_TYPE_SUB_OWNER = 3
|
||||
};
|
||||
|
||||
struct drm_nvidia_grant_permissions_params {
|
||||
int32_t fd; /* IN */
|
||||
uint32_t dpyId; /* IN */
|
||||
uint32_t type; /* IN */
|
||||
};
|
||||
|
||||
struct drm_nvidia_revoke_permissions_params {
|
||||
uint32_t dpyId; /* IN */
|
||||
uint32_t type; /* IN */
|
||||
};
|
||||
|
||||
struct drm_nvidia_semsurf_fence_ctx_create_params {
|
||||
uint64_t index; /* IN Index of the desired semaphore in the
|
||||
* fence context's semaphore surface */
|
||||
|
||||
/* Params for importing userspace semaphore surface */
|
||||
uint64_t nvkms_params_ptr; /* IN */
|
||||
uint64_t nvkms_params_size; /* IN */
|
||||
|
||||
uint32_t handle; /* OUT GEM handle to fence context */
|
||||
uint32_t __pad;
|
||||
};
|
||||
|
||||
struct drm_nvidia_semsurf_fence_create_params {
|
||||
uint32_t fence_context_handle; /* IN GEM handle to fence context on which
|
||||
* fence is run on */
|
||||
|
||||
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
|
||||
* after which the fence will be signaled
|
||||
* with its error status set to -ETIMEDOUT.
|
||||
* Default timeout value is 5000ms */
|
||||
|
||||
uint64_t wait_value; /* IN Semaphore value to reach before signal */
|
||||
|
||||
int32_t fd; /* OUT sync FD object representing the
|
||||
* semaphore at the specified index reaching
|
||||
* a value >= wait_value */
|
||||
uint32_t __pad;
|
||||
};
|
||||
|
||||
/*
|
||||
* Note there is no provision for timeouts in this ioctl. The kernel
|
||||
* documentation asserts timeouts should be handled by fence producers, and
|
||||
* that waiters should not second-guess their logic, as it is producers rather
|
||||
* than consumers that have better information when it comes to determining a
|
||||
* reasonable timeout for a given workload.
|
||||
*/
|
||||
struct drm_nvidia_semsurf_fence_wait_params {
|
||||
uint32_t fence_context_handle; /* IN GEM handle to fence context which will
|
||||
* be used to wait on the sync FD. Need not
|
||||
* be the fence context used to create the
|
||||
* sync FD. */
|
||||
|
||||
int32_t fd; /* IN sync FD object to wait on */
|
||||
|
||||
uint64_t pre_wait_value; /* IN Wait for the semaphore represented by
|
||||
* fence_context to reach this value before
|
||||
* waiting for the sync file. */
|
||||
|
||||
uint64_t post_wait_value; /* IN Signal the semaphore represented by
|
||||
* fence_context to this value after waiting
|
||||
* for the sync file */
|
||||
};
|
||||
|
||||
struct drm_nvidia_semsurf_fence_attach_params {
|
||||
uint32_t handle; /* IN GEM handle of buffer */
|
||||
|
||||
uint32_t fence_context_handle; /* IN GEM handle of fence context */
|
||||
|
||||
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
|
||||
* after which the fence will be signaled
|
||||
* with its error status set to -ETIMEDOUT.
|
||||
* Default timeout value is 5000ms */
|
||||
|
||||
uint32_t shared; /* IN If true, fence will reserve shared
|
||||
* access to the buffer, otherwise it will
|
||||
* reserve exclusive access */
|
||||
|
||||
uint64_t wait_value; /* IN Semaphore value to reach before signal */
|
||||
};
|
||||
|
||||
#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -21,8 +21,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include "nvidia-drm-os-interface.h"
|
||||
#include "nvidia-drm.h"
|
||||
@@ -31,135 +29,18 @@
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "nv-mm.h"
|
||||
|
||||
MODULE_PARM_DESC(
|
||||
modeset,
|
||||
"Enable atomic kernel modesetting (1 = enable, 0 = disable (default))");
|
||||
bool nv_drm_modeset_module_param = false;
|
||||
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
|
||||
|
||||
void *nv_drm_calloc(size_t nmemb, size_t size)
|
||||
{
|
||||
size_t total_size = nmemb * size;
|
||||
//
|
||||
// Check for overflow.
|
||||
//
|
||||
if ((nmemb != 0) && ((total_size / nmemb) != size))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
return kzalloc(nmemb * size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void nv_drm_free(void *ptr)
|
||||
{
|
||||
if (IS_ERR(ptr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
char *nv_drm_asprintf(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
char *p;
|
||||
|
||||
va_start(ap, fmt);
|
||||
p = kvasprintf(GFP_KERNEL, fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
|
||||
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
|
||||
#elif defined(NVCPU_FAMILY_ARM)
|
||||
#if defined(NVCPU_ARM)
|
||||
#define WRITE_COMBINE_FLUSH() { dsb(); outer_sync(); }
|
||||
#elif defined(NVCPU_AARCH64)
|
||||
#define WRITE_COMBINE_FLUSH() mb()
|
||||
#endif
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
|
||||
#if defined(NV_DRM_FBDEV_AVAILABLE)
|
||||
MODULE_PARM_DESC(
|
||||
fbdev,
|
||||
"Create a framebuffer device (1 = enable, 0 = disable (default)) (EXPERIMENTAL)");
|
||||
module_param_named(fbdev, nv_drm_fbdev_module_param, bool, 0400);
|
||||
#endif
|
||||
|
||||
void nv_drm_write_combine_flush(void)
|
||||
{
|
||||
WRITE_COMBINE_FLUSH();
|
||||
}
|
||||
|
||||
int nv_drm_lock_user_pages(unsigned long address,
|
||||
unsigned long pages_count, struct page ***pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct page **user_pages;
|
||||
int pages_pinned;
|
||||
|
||||
user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages));
|
||||
|
||||
if (user_pages == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nv_mmap_read_lock(mm);
|
||||
|
||||
pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE,
|
||||
user_pages, NULL);
|
||||
nv_mmap_read_unlock(mm);
|
||||
|
||||
if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
|
||||
goto failed;
|
||||
}
|
||||
|
||||
*pages = user_pages;
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
|
||||
if (pages_pinned > 0) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pages_pinned; i++) {
|
||||
NV_UNPIN_USER_PAGE(user_pages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
nv_drm_free(user_pages);
|
||||
|
||||
return (pages_pinned < 0) ? pages_pinned : -EINVAL;
|
||||
}
|
||||
|
||||
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < pages_count; i++) {
|
||||
set_page_dirty_lock(pages[i]);
|
||||
NV_UNPIN_USER_PAGE(pages[i]);
|
||||
}
|
||||
|
||||
nv_drm_free(pages);
|
||||
}
|
||||
|
||||
void *nv_drm_vmap(struct page **pages, unsigned long pages_count)
|
||||
{
|
||||
return vmap(pages, pages_count, VM_USERMAP, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
void nv_drm_vunmap(void *address)
|
||||
{
|
||||
vunmap(address);
|
||||
}
|
||||
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
|
||||
/*************************************************************************
|
||||
|
||||
@@ -131,16 +131,19 @@ static int __nv_drm_put_back_post_fence_fd(
|
||||
const struct NvKmsKapiLayerReplyConfig *layer_reply_config)
|
||||
{
|
||||
int fd = layer_reply_config->postSyncptFd;
|
||||
int ret = 0;
|
||||
|
||||
if ((fd >= 0) && (plane_state->fd_user_ptr != NULL)) {
|
||||
if (put_user(fd, plane_state->fd_user_ptr)) {
|
||||
return -EFAULT;
|
||||
ret = copy_to_user(plane_state->fd_user_ptr, &fd, sizeof(fd));
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*! set back to Null and let set_property specify it again */
|
||||
plane_state->fd_user_ptr = NULL;
|
||||
}
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __nv_drm_get_syncpt_data(
|
||||
@@ -234,6 +237,14 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If sub-owner permission was granted to another NVKMS client, disallow
|
||||
* modesets through the DRM interface.
|
||||
*/
|
||||
if (nv_dev->subOwnershipGranted) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(requested_config, 0, sizeof(*requested_config));
|
||||
|
||||
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
|
||||
@@ -271,9 +282,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
|
||||
|
||||
nv_new_crtc_state->nv_flip = NULL;
|
||||
}
|
||||
#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
|
||||
requested_config->headRequestedConfig[nv_crtc->head].modeSetConfig.vrrEnabled = new_crtc_state->vrr_enabled;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,7 +297,9 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
|
||||
requested_config,
|
||||
&reply_config,
|
||||
commit)) {
|
||||
return -EINVAL;
|
||||
if (commit || reply_config.flipResult != NV_KMS_FLIP_RESULT_IN_PROGRESS) {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (commit && nv_dev->supportsSyncpts) {
|
||||
@@ -311,6 +321,24 @@ int nv_drm_atomic_check(struct drm_device *dev,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
int i;
|
||||
|
||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
/*
|
||||
* if the color management changed on the crtc, we need to update the
|
||||
* crtc's plane's CSC matrices, so add the crtc's planes to the commit
|
||||
*/
|
||||
if (crtc_state->color_mgmt_changed) {
|
||||
if ((ret = drm_atomic_add_affected_planes(state, crtc)) != 0) {
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
|
||||
|
||||
if ((ret = drm_atomic_helper_check(dev, state)) != 0) {
|
||||
goto done;
|
||||
}
|
||||
@@ -385,42 +413,56 @@ int nv_drm_atomic_commit(struct drm_device *dev,
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
|
||||
/*
|
||||
* drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
|
||||
* for nonblocking commit if previous updates (commit tasks/flip event) are
|
||||
* pending. In case of blocking commits it mandates to wait for previous
|
||||
* updates to complete.
|
||||
* XXX: drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
|
||||
* for nonblocking commit if the commit would need to wait for previous
|
||||
* updates (commit tasks/flip event) to complete. In case of blocking
|
||||
* commits it mandates to wait for previous updates to complete. However,
|
||||
* the kernel DRM-KMS documentation does explicitly allow maintaining a
|
||||
* queue of outstanding commits.
|
||||
*
|
||||
* Our system already implements such a queue, but due to
|
||||
* bug 4054608, it is currently not used.
|
||||
*/
|
||||
if (nonblock) {
|
||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||
|
||||
/*
|
||||
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
|
||||
* because:
|
||||
*
|
||||
* The core DRM driver acquires lock for all affected crtcs before
|
||||
* calling into ->commit() hook, therefore it is not possible for
|
||||
* other threads to call into ->commit() hook affecting same crtcs
|
||||
* and enqueue flip objects into flip_list -
|
||||
*
|
||||
* nv_drm_atomic_commit_internal()
|
||||
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
|
||||
* |-> nv_drm_crtc_enqueue_flip()
|
||||
*
|
||||
* Only possibility is list_empty check races with code path
|
||||
* dequeuing flip object -
|
||||
*
|
||||
* __nv_drm_handle_flip_event()
|
||||
* |-> nv_drm_crtc_dequeue_flip()
|
||||
*
|
||||
* But this race condition can't lead list_empty() to return
|
||||
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
|
||||
* updating the list could not trick us into thinking the list is
|
||||
* empty when it isn't.
|
||||
*/
|
||||
/*
|
||||
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
|
||||
* because:
|
||||
*
|
||||
* The core DRM driver acquires lock for all affected crtcs before
|
||||
* calling into ->commit() hook, therefore it is not possible for
|
||||
* other threads to call into ->commit() hook affecting same crtcs
|
||||
* and enqueue flip objects into flip_list -
|
||||
*
|
||||
* nv_drm_atomic_commit_internal()
|
||||
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
|
||||
* |-> nv_drm_crtc_enqueue_flip()
|
||||
*
|
||||
* Only possibility is list_empty check races with code path
|
||||
* dequeuing flip object -
|
||||
*
|
||||
* __nv_drm_handle_flip_event()
|
||||
* |-> nv_drm_crtc_dequeue_flip()
|
||||
*
|
||||
* But this race condition can't lead list_empty() to return
|
||||
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
|
||||
* updating the list could not trick us into thinking the list is
|
||||
* empty when it isn't.
|
||||
*/
|
||||
if (nonblock) {
|
||||
if (!list_empty(&nv_crtc->flip_list)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
} else {
|
||||
if (wait_event_timeout(
|
||||
nv_dev->flip_event_wq,
|
||||
list_empty(&nv_crtc->flip_list),
|
||||
3 * HZ /* 3 second */) == 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Flip event timeout on head %u", nv_crtc->head);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -464,6 +506,7 @@ int nv_drm_atomic_commit(struct drm_device *dev,
|
||||
|
||||
goto done;
|
||||
}
|
||||
nv_dev->drmMasterChangedSinceLastAtomicCommit = NV_FALSE;
|
||||
|
||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||
|
||||
285
kernel-open/nvidia-drm/nvidia-drm-os-interface.c
Normal file
285
kernel-open/nvidia-drm/nvidia-drm-os-interface.c
Normal file
@@ -0,0 +1,285 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "nvidia-drm-os-interface.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
|
||||
#include <linux/file.h>
|
||||
#include <linux/sync_file.h>
|
||||
#endif
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
#include "nv-mm.h"
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
bool nv_drm_modeset_module_param = false;
|
||||
bool nv_drm_fbdev_module_param = false;
|
||||
|
||||
void *nv_drm_calloc(size_t nmemb, size_t size)
|
||||
{
|
||||
size_t total_size = nmemb * size;
|
||||
//
|
||||
// Check for overflow.
|
||||
//
|
||||
if ((nmemb != 0) && ((total_size / nmemb) != size))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
return kzalloc(nmemb * size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void nv_drm_free(void *ptr)
|
||||
{
|
||||
if (IS_ERR(ptr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
char *nv_drm_asprintf(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
char *p;
|
||||
|
||||
va_start(ap, fmt);
|
||||
p = kvasprintf(GFP_KERNEL, fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
|
||||
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
|
||||
#else
|
||||
#define WRITE_COMBINE_FLUSH() mb()
|
||||
#endif
|
||||
|
||||
void nv_drm_write_combine_flush(void)
|
||||
{
|
||||
WRITE_COMBINE_FLUSH();
|
||||
}
|
||||
|
||||
int nv_drm_lock_user_pages(unsigned long address,
|
||||
unsigned long pages_count, struct page ***pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct page **user_pages;
|
||||
int pages_pinned;
|
||||
|
||||
user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages));
|
||||
|
||||
if (user_pages == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nv_mmap_read_lock(mm);
|
||||
|
||||
pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE,
|
||||
user_pages);
|
||||
nv_mmap_read_unlock(mm);
|
||||
|
||||
if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
|
||||
goto failed;
|
||||
}
|
||||
|
||||
*pages = user_pages;
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
|
||||
if (pages_pinned > 0) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pages_pinned; i++) {
|
||||
NV_UNPIN_USER_PAGE(user_pages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
nv_drm_free(user_pages);
|
||||
|
||||
return (pages_pinned < 0) ? pages_pinned : -EINVAL;
|
||||
}
|
||||
|
||||
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < pages_count; i++) {
|
||||
set_page_dirty_lock(pages[i]);
|
||||
NV_UNPIN_USER_PAGE(pages[i]);
|
||||
}
|
||||
|
||||
nv_drm_free(pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* linuxkpi vmap doesn't use the flags argument as it
|
||||
* doesn't seem to be needed. Define VM_USERMAP to 0
|
||||
* to make errors go away
|
||||
*
|
||||
* vmap: sys/compat/linuxkpi/common/src/linux_compat.c
|
||||
*/
|
||||
#if defined(NV_BSD)
|
||||
#define VM_USERMAP 0
|
||||
#endif
|
||||
|
||||
void *nv_drm_vmap(struct page **pages, unsigned long pages_count)
|
||||
{
|
||||
return vmap(pages, pages_count, VM_USERMAP, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
void nv_drm_vunmap(void *address)
|
||||
{
|
||||
vunmap(address);
|
||||
}
|
||||
|
||||
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name)
|
||||
{
|
||||
worker->shutting_down = false;
|
||||
if (nv_kthread_q_init(&worker->q, name)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
spin_lock_init(&worker->lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void nv_drm_workthread_shutdown(nv_drm_workthread *worker)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&worker->lock, flags);
|
||||
worker->shutting_down = true;
|
||||
spin_unlock_irqrestore(&worker->lock, flags);
|
||||
|
||||
nv_kthread_q_stop(&worker->q);
|
||||
}
|
||||
|
||||
void nv_drm_workthread_work_init(nv_drm_work *work,
|
||||
void (*callback)(void *),
|
||||
void *arg)
|
||||
{
|
||||
nv_kthread_q_item_init(work, callback, arg);
|
||||
}
|
||||
|
||||
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&worker->lock, flags);
|
||||
if (!worker->shutting_down) {
|
||||
ret = nv_kthread_q_schedule_q_item(&worker->q, work);
|
||||
}
|
||||
spin_unlock_irqrestore(&worker->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void nv_drm_timer_setup(nv_drm_timer *timer, void (*callback)(nv_drm_timer *nv_drm_timer))
|
||||
{
|
||||
nv_timer_setup(timer, callback);
|
||||
}
|
||||
|
||||
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long timeout_native)
|
||||
{
|
||||
mod_timer(&timer->kernel_timer, timeout_native);
|
||||
}
|
||||
|
||||
unsigned long nv_drm_timer_now(void)
|
||||
{
|
||||
return jiffies;
|
||||
}
|
||||
|
||||
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
|
||||
{
|
||||
return jiffies + msecs_to_jiffies(relative_timeout_ms);
|
||||
}
|
||||
|
||||
bool nv_drm_del_timer_sync(nv_drm_timer *timer)
|
||||
{
|
||||
if (del_timer_sync(&timer->kernel_timer)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
int nv_drm_create_sync_file(nv_dma_fence_t *fence)
|
||||
{
|
||||
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
|
||||
struct sync_file *sync;
|
||||
int fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
|
||||
if (fd < 0) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
/* sync_file_create() generates its own reference to the fence */
|
||||
sync = sync_file_create(fence);
|
||||
|
||||
if (IS_ERR(sync)) {
|
||||
put_unused_fd(fd);
|
||||
return PTR_ERR(sync);
|
||||
}
|
||||
|
||||
fd_install(fd, sync->file);
|
||||
|
||||
return fd;
|
||||
#else /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
|
||||
return -EINVAL;
|
||||
#endif /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
|
||||
}
|
||||
|
||||
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd)
|
||||
{
|
||||
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
|
||||
return sync_file_get_fence(fd);
|
||||
#else /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
|
||||
return NULL;
|
||||
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
|
||||
}
|
||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
||||
|
||||
void nv_drm_yield(void)
|
||||
{
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
}
|
||||
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
@@ -29,10 +29,53 @@
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
#include "nvidia-dma-fence-helper.h"
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX) || defined(NV_BSD)
|
||||
#include "nv-kthread-q.h"
|
||||
#include "linux/spinlock.h"
|
||||
|
||||
typedef struct nv_drm_workthread {
|
||||
spinlock_t lock;
|
||||
struct nv_kthread_q q;
|
||||
bool shutting_down;
|
||||
} nv_drm_workthread;
|
||||
|
||||
typedef nv_kthread_q_item_t nv_drm_work;
|
||||
|
||||
#else
|
||||
#error "Need to define deferred work primitives for this OS"
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX) || defined(NV_BSD)
|
||||
#include "nv-timer.h"
|
||||
|
||||
typedef struct nv_timer nv_drm_timer;
|
||||
|
||||
#else
|
||||
#error "Need to define kernel timer callback primitives for this OS"
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
|
||||
#define NV_DRM_FBDEV_AVAILABLE
|
||||
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
|
||||
#define NV_DRM_FBDEV_AVAILABLE
|
||||
#define NV_DRM_FBDEV_TTM_AVAILABLE
|
||||
#endif
|
||||
|
||||
struct page;
|
||||
|
||||
/* Set to true when the atomic modeset feature is enabled. */
|
||||
extern bool nv_drm_modeset_module_param;
|
||||
#if defined(NV_DRM_FBDEV_AVAILABLE)
|
||||
/* Set to true when the nvidia-drm driver should install a framebuffer device */
|
||||
extern bool nv_drm_fbdev_module_param;
|
||||
#endif
|
||||
|
||||
void *nv_drm_calloc(size_t nmemb, size_t size);
|
||||
|
||||
@@ -51,6 +94,37 @@ void *nv_drm_vmap(struct page **pages, unsigned long pages_count);
|
||||
|
||||
void nv_drm_vunmap(void *address);
|
||||
|
||||
#endif
|
||||
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name);
|
||||
|
||||
/* Can be called concurrently with nv_drm_workthread_add_work() */
|
||||
void nv_drm_workthread_shutdown(nv_drm_workthread *worker);
|
||||
|
||||
void nv_drm_workthread_work_init(nv_drm_work *work,
|
||||
void (*callback)(void *),
|
||||
void *arg);
|
||||
|
||||
/* Can be called concurrently with nv_drm_workthread_shutdown() */
|
||||
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work);
|
||||
|
||||
void nv_drm_timer_setup(nv_drm_timer *timer,
|
||||
void (*callback)(nv_drm_timer *nv_drm_timer));
|
||||
|
||||
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long relative_timeout_ms);
|
||||
|
||||
bool nv_drm_del_timer_sync(nv_drm_timer *timer);
|
||||
|
||||
unsigned long nv_drm_timer_now(void);
|
||||
|
||||
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms);
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
int nv_drm_create_sync_file(nv_dma_fence_t *fence);
|
||||
|
||||
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd);
|
||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
||||
|
||||
void nv_drm_yield(void);
|
||||
|
||||
#endif /* defined(NV_DRM_AVAILABLE) */
|
||||
|
||||
#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-ioctl.h"
|
||||
#include "nvidia-drm-gem.h"
|
||||
#include "nvidia-drm-prime-fence.h"
|
||||
#include "nvidia-dma-resv-helper.h"
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
|
||||
#include "nvidia-dma-fence-helper.h"
|
||||
|
||||
struct nv_drm_fence_context {
|
||||
struct nv_drm_device *nv_dev;
|
||||
|
||||
uint32_t context;
|
||||
|
||||
NvU64 fenceSemIndex; /* Index into semaphore surface */
|
||||
|
||||
/* Mapped semaphore surface */
|
||||
struct NvKmsKapiMemory *pSemSurface;
|
||||
NvU32 *pLinearAddress;
|
||||
|
||||
/* Protects nv_drm_fence_context::{pending, last_seqno} */
|
||||
spinlock_t lock;
|
||||
|
||||
/*
|
||||
* Software signaling structures. __nv_drm_fence_context_new()
|
||||
* allocates channel event and __nv_drm_fence_context_destroy() frees it.
|
||||
* There are no simultaneous read/write access to 'cb', therefore it does
|
||||
* not require spin-lock protection.
|
||||
*/
|
||||
struct NvKmsKapiChannelEvent *cb;
|
||||
|
||||
/* List of pending fences which are not yet signaled */
|
||||
struct list_head pending;
|
||||
|
||||
unsigned last_seqno;
|
||||
};
|
||||
|
||||
struct nv_drm_prime_fence {
|
||||
struct list_head list_entry;
|
||||
nv_dma_fence_t base;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static inline
|
||||
struct nv_drm_prime_fence *to_nv_drm_prime_fence(nv_dma_fence_t *fence)
|
||||
{
|
||||
return container_of(fence, struct nv_drm_prime_fence, base);
|
||||
}
|
||||
|
||||
static const char*
|
||||
nv_drm_gem_prime_fence_op_get_driver_name(nv_dma_fence_t *fence)
|
||||
{
|
||||
return "NVIDIA";
|
||||
}
|
||||
|
||||
static const char*
|
||||
nv_drm_gem_prime_fence_op_get_timeline_name(nv_dma_fence_t *fence)
|
||||
{
|
||||
return "nvidia.prime";
|
||||
}
|
||||
|
||||
static bool nv_drm_gem_prime_fence_op_enable_signaling(nv_dma_fence_t *fence)
|
||||
{
|
||||
// DO NOTHING
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nv_drm_gem_prime_fence_op_release(nv_dma_fence_t *fence)
|
||||
{
|
||||
struct nv_drm_prime_fence *nv_fence = to_nv_drm_prime_fence(fence);
|
||||
nv_drm_free(nv_fence);
|
||||
}
|
||||
|
||||
static signed long
|
||||
nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence,
|
||||
bool intr, signed long timeout)
|
||||
{
|
||||
/*
|
||||
* If the waiter requests to wait with no timeout, force a timeout to ensure
|
||||
* that it won't get stuck forever in the kernel if something were to go
|
||||
* wrong with signaling, such as a malicious userspace not releasing the
|
||||
* semaphore.
|
||||
*
|
||||
* 96 ms (roughly 6 frames @ 60 Hz) is arbitrarily chosen to be long enough
|
||||
* that it should never get hit during normal operation, but not so long
|
||||
* that the system becomes unresponsive.
|
||||
*/
|
||||
return nv_dma_fence_default_wait(fence, intr,
|
||||
(timeout == MAX_SCHEDULE_TIMEOUT) ?
|
||||
msecs_to_jiffies(96) : timeout);
|
||||
}
|
||||
|
||||
static const nv_dma_fence_ops_t nv_drm_gem_prime_fence_ops = {
|
||||
.get_driver_name = nv_drm_gem_prime_fence_op_get_driver_name,
|
||||
.get_timeline_name = nv_drm_gem_prime_fence_op_get_timeline_name,
|
||||
.enable_signaling = nv_drm_gem_prime_fence_op_enable_signaling,
|
||||
.release = nv_drm_gem_prime_fence_op_release,
|
||||
.wait = nv_drm_gem_prime_fence_op_wait,
|
||||
};
|
||||
|
||||
static inline void
|
||||
__nv_drm_prime_fence_signal(struct nv_drm_prime_fence *nv_fence)
|
||||
{
|
||||
list_del(&nv_fence->list_entry);
|
||||
nv_dma_fence_signal(&nv_fence->base);
|
||||
nv_dma_fence_put(&nv_fence->base);
|
||||
}
|
||||
|
||||
static void nv_drm_gem_prime_force_fence_signal(
|
||||
struct nv_drm_fence_context *nv_fence_context)
|
||||
{
|
||||
WARN_ON(!spin_is_locked(&nv_fence_context->lock));
|
||||
|
||||
while (!list_empty(&nv_fence_context->pending)) {
|
||||
struct nv_drm_prime_fence *nv_fence = list_first_entry(
|
||||
&nv_fence_context->pending,
|
||||
typeof(*nv_fence),
|
||||
list_entry);
|
||||
|
||||
__nv_drm_prime_fence_signal(nv_fence);
|
||||
}
|
||||
}
|
||||
|
||||
static void nv_drm_gem_prime_fence_event
|
||||
(
|
||||
void *dataPtr,
|
||||
NvU32 dataU32
|
||||
)
|
||||
{
|
||||
struct nv_drm_fence_context *nv_fence_context = dataPtr;
|
||||
|
||||
spin_lock(&nv_fence_context->lock);
|
||||
|
||||
while (!list_empty(&nv_fence_context->pending)) {
|
||||
struct nv_drm_prime_fence *nv_fence = list_first_entry(
|
||||
&nv_fence_context->pending,
|
||||
typeof(*nv_fence),
|
||||
list_entry);
|
||||
|
||||
/* Index into surface with 16 byte stride */
|
||||
unsigned int seqno = *((nv_fence_context->pLinearAddress) +
|
||||
(nv_fence_context->fenceSemIndex * 4));
|
||||
|
||||
if (nv_fence->base.seqno > seqno) {
|
||||
/*
|
||||
* Fences in list are placed in increasing order of sequence
|
||||
* number, breaks a loop once found first fence not
|
||||
* ready to signal.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
__nv_drm_prime_fence_signal(nv_fence);
|
||||
}
|
||||
|
||||
spin_unlock(&nv_fence_context->lock);
|
||||
}
|
||||
|
||||
static inline struct nv_drm_fence_context *__nv_drm_fence_context_new(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct drm_nvidia_fence_context_create_params *p)
|
||||
{
|
||||
struct nv_drm_fence_context *nv_fence_context;
|
||||
struct NvKmsKapiMemory *pSemSurface;
|
||||
NvU32 *pLinearAddress;
|
||||
|
||||
/* Allocate backup nvkms resources */
|
||||
|
||||
pSemSurface = nvKms->importMemory(nv_dev->pDevice,
|
||||
p->size,
|
||||
p->import_mem_nvkms_params_ptr,
|
||||
p->import_mem_nvkms_params_size);
|
||||
if (!pSemSurface) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to import fence semaphore surface");
|
||||
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (!nvKms->mapMemory(nv_dev->pDevice,
|
||||
pSemSurface,
|
||||
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
|
||||
(void **) &pLinearAddress)) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to map fence semaphore surface");
|
||||
|
||||
goto failed_to_map_memory;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a fence context object, initialize it and allocate channel
|
||||
* event for it.
|
||||
*/
|
||||
|
||||
if ((nv_fence_context = nv_drm_calloc(
|
||||
1,
|
||||
sizeof(*nv_fence_context))) == NULL) {
|
||||
goto failed_alloc_fence_context;
|
||||
}
|
||||
|
||||
/*
|
||||
* nv_dma_fence_context_alloc() cannot fail, so we do not need
|
||||
* to check a return value.
|
||||
*/
|
||||
|
||||
*nv_fence_context = (struct nv_drm_fence_context) {
|
||||
.nv_dev = nv_dev,
|
||||
.context = nv_dma_fence_context_alloc(1),
|
||||
.pSemSurface = pSemSurface,
|
||||
.pLinearAddress = pLinearAddress,
|
||||
.fenceSemIndex = p->index,
|
||||
};
|
||||
|
||||
INIT_LIST_HEAD(&nv_fence_context->pending);
|
||||
|
||||
spin_lock_init(&nv_fence_context->lock);
|
||||
|
||||
/*
|
||||
* Except 'cb', the fence context should be completely initialized
|
||||
* before channel event allocation because the fence context may start
|
||||
* receiving events immediately after allocation.
|
||||
*
|
||||
* There are no simultaneous read/write access to 'cb', therefore it does
|
||||
* not require spin-lock protection.
|
||||
*/
|
||||
nv_fence_context->cb =
|
||||
nvKms->allocateChannelEvent(nv_dev->pDevice,
|
||||
nv_drm_gem_prime_fence_event,
|
||||
nv_fence_context,
|
||||
p->event_nvkms_params_ptr,
|
||||
p->event_nvkms_params_size);
|
||||
if (!nv_fence_context->cb) {
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev,
|
||||
"Failed to allocate fence signaling event");
|
||||
goto failed_to_allocate_channel_event;
|
||||
}
|
||||
|
||||
return nv_fence_context;
|
||||
|
||||
failed_to_allocate_channel_event:
|
||||
nv_drm_free(nv_fence_context);
|
||||
|
||||
failed_alloc_fence_context:
|
||||
|
||||
nvKms->unmapMemory(nv_dev->pDevice,
|
||||
pSemSurface,
|
||||
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
|
||||
(void *) pLinearAddress);
|
||||
|
||||
failed_to_map_memory:
|
||||
nvKms->freeMemory(nv_dev->pDevice, pSemSurface);
|
||||
|
||||
failed:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __nv_drm_fence_context_destroy(
|
||||
struct nv_drm_fence_context *nv_fence_context)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = nv_fence_context->nv_dev;
|
||||
|
||||
/*
|
||||
* Free channel event before destroying the fence context, otherwise event
|
||||
* callback continue to get called.
|
||||
*/
|
||||
nvKms->freeChannelEvent(nv_dev->pDevice, nv_fence_context->cb);
|
||||
|
||||
/* Force signal all pending fences and empty pending list */
|
||||
spin_lock(&nv_fence_context->lock);
|
||||
|
||||
nv_drm_gem_prime_force_fence_signal(nv_fence_context);
|
||||
|
||||
spin_unlock(&nv_fence_context->lock);
|
||||
|
||||
/* Free nvkms resources */
|
||||
|
||||
nvKms->unmapMemory(nv_dev->pDevice,
|
||||
nv_fence_context->pSemSurface,
|
||||
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
|
||||
(void *) nv_fence_context->pLinearAddress);
|
||||
|
||||
nvKms->freeMemory(nv_dev->pDevice, nv_fence_context->pSemSurface);
|
||||
|
||||
nv_drm_free(nv_fence_context);
|
||||
}
|
||||
|
||||
static nv_dma_fence_t *__nv_drm_fence_context_create_fence(
|
||||
struct nv_drm_fence_context *nv_fence_context,
|
||||
unsigned int seqno)
|
||||
{
|
||||
struct nv_drm_prime_fence *nv_fence;
|
||||
int ret = 0;
|
||||
|
||||
if ((nv_fence = nv_drm_calloc(1, sizeof(*nv_fence))) == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&nv_fence_context->lock);
|
||||
|
||||
/*
|
||||
* If seqno wrapped, force signal fences to make sure none of them
|
||||
* get stuck.
|
||||
*/
|
||||
if (seqno < nv_fence_context->last_seqno) {
|
||||
nv_drm_gem_prime_force_fence_signal(nv_fence_context);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&nv_fence->list_entry);
|
||||
|
||||
spin_lock_init(&nv_fence->lock);
|
||||
|
||||
nv_dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops,
|
||||
&nv_fence->lock, nv_fence_context->context,
|
||||
seqno);
|
||||
|
||||
list_add_tail(&nv_fence->list_entry, &nv_fence_context->pending);
|
||||
|
||||
nv_fence_context->last_seqno = seqno;
|
||||
|
||||
spin_unlock(&nv_fence_context->lock);
|
||||
|
||||
out:
|
||||
return ret != 0 ? ERR_PTR(ret) : &nv_fence->base;
|
||||
}
|
||||
|
||||
int nv_drm_fence_supported_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
return nv_dev->pDevice ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
struct nv_drm_gem_fence_context {
|
||||
struct nv_drm_gem_object base;
|
||||
struct nv_drm_fence_context *nv_fence_context;
|
||||
};
|
||||
|
||||
static inline struct nv_drm_gem_fence_context *to_gem_fence_context(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
if (nv_gem != NULL) {
|
||||
return container_of(nv_gem, struct nv_drm_gem_fence_context, base);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tear down of the 'struct nv_drm_gem_fence_context' object is not expected
|
||||
* to be happen from any worker thread, if that happen it causes dead-lock
|
||||
* because tear down sequence calls to flush all existing
|
||||
* worker thread.
|
||||
*/
|
||||
static void __nv_drm_gem_fence_context_free(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_gem_fence_context *nv_gem_fence_context =
|
||||
to_gem_fence_context(nv_gem);
|
||||
|
||||
__nv_drm_fence_context_destroy(nv_gem_fence_context->nv_fence_context);
|
||||
|
||||
nv_drm_free(nv_gem_fence_context);
|
||||
}
|
||||
|
||||
const struct nv_drm_gem_object_funcs nv_gem_fence_context_ops = {
|
||||
.free = __nv_drm_gem_fence_context_free,
|
||||
};
|
||||
|
||||
static inline
|
||||
struct nv_drm_gem_fence_context *__nv_drm_gem_object_fence_context_lookup(
|
||||
struct drm_device *dev,
|
||||
struct drm_file *filp,
|
||||
u32 handle)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem =
|
||||
nv_drm_gem_object_lookup(dev, filp, handle);
|
||||
|
||||
if (nv_gem != NULL && nv_gem->ops != &nv_gem_fence_context_ops) {
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return to_gem_fence_context(nv_gem);
|
||||
}
|
||||
|
||||
int nv_drm_fence_context_create_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct drm_nvidia_fence_context_create_params *p = data;
|
||||
struct nv_drm_gem_fence_context *nv_gem_fence_context = NULL;
|
||||
|
||||
if ((nv_gem_fence_context = nv_drm_calloc(
|
||||
1,
|
||||
sizeof(struct nv_drm_gem_fence_context))) == NULL) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((nv_gem_fence_context->nv_fence_context =
|
||||
__nv_drm_fence_context_new(nv_dev, p)) == NULL) {
|
||||
goto fence_context_new_failed;
|
||||
}
|
||||
|
||||
nv_drm_gem_object_init(nv_dev,
|
||||
&nv_gem_fence_context->base,
|
||||
&nv_gem_fence_context_ops,
|
||||
0 /* size */,
|
||||
NULL /* pMemory */);
|
||||
|
||||
return nv_drm_gem_handle_create_drop_reference(filep,
|
||||
&nv_gem_fence_context->base,
|
||||
&p->handle);
|
||||
|
||||
fence_context_new_failed:
|
||||
nv_drm_free(nv_gem_fence_context);
|
||||
|
||||
done:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int nv_drm_gem_fence_attach_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct drm_nvidia_gem_fence_attach_params *p = data;
|
||||
|
||||
struct nv_drm_gem_object *nv_gem;
|
||||
struct nv_drm_gem_fence_context *nv_gem_fence_context;
|
||||
|
||||
nv_dma_fence_t *fence;
|
||||
|
||||
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
|
||||
|
||||
if (!nv_gem) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lookup gem object for fence attach: 0x%08x",
|
||||
p->handle);
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
if((nv_gem_fence_context = __nv_drm_gem_object_fence_context_lookup(
|
||||
nv_dev->dev,
|
||||
filep,
|
||||
p->fence_context_handle)) == NULL) {
|
||||
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lookup gem object for fence context: 0x%08x",
|
||||
p->fence_context_handle);
|
||||
|
||||
goto fence_context_lookup_failed;
|
||||
}
|
||||
|
||||
if (IS_ERR(fence = __nv_drm_fence_context_create_fence(
|
||||
nv_gem_fence_context->nv_fence_context,
|
||||
p->sem_thresh))) {
|
||||
ret = PTR_ERR(fence);
|
||||
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to allocate fence: 0x%08x", p->handle);
|
||||
|
||||
goto fence_context_create_fence_failed;
|
||||
}
|
||||
|
||||
nv_dma_resv_lock(&nv_gem->resv, NULL);
|
||||
|
||||
ret = nv_dma_resv_reserve_fences(&nv_gem->resv, 1, false);
|
||||
if (ret == 0) {
|
||||
nv_dma_resv_add_excl_fence(&nv_gem->resv, fence);
|
||||
} else {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to reserve fence. Error code: %d", ret);
|
||||
}
|
||||
|
||||
nv_dma_resv_unlock(&nv_gem->resv);
|
||||
|
||||
fence_context_create_fence_failed:
|
||||
nv_drm_gem_object_unreference_unlocked(&nv_gem_fence_context->base);
|
||||
|
||||
fence_context_lookup_failed:
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* NV_DRM_FENCE_AVAILABLE */
|
||||
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
@@ -46,12 +46,33 @@
|
||||
#define NV_DRM_LOG_ERR(__fmt, ...) \
|
||||
DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||
|
||||
/*
|
||||
* DRM_WARN() was added in v4.9 by kernel commit
|
||||
* 30b0da8d556e65ff935a56cd82c05ba0516d3e4a
|
||||
*
|
||||
* Before this commit, only DRM_INFO and DRM_ERROR were defined and
|
||||
* DRM_INFO(fmt, ...) was defined as
|
||||
* printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__). So, if
|
||||
* DRM_WARN is undefined this defines NV_DRM_LOG_WARN following the
|
||||
* same pattern as DRM_INFO.
|
||||
*/
|
||||
#ifdef DRM_WARN
|
||||
#define NV_DRM_LOG_WARN(__fmt, ...) \
|
||||
DRM_WARN("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||
#else
|
||||
#define NV_DRM_LOG_WARN(__fmt, ...) \
|
||||
printk(KERN_WARNING "[" DRM_NAME "] [nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#define NV_DRM_LOG_INFO(__fmt, ...) \
|
||||
DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||
|
||||
#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \
|
||||
NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
||||
|
||||
#define NV_DRM_DEV_LOG_WARN(__dev, __fmt, ...) \
|
||||
NV_DRM_LOG_WARN("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
||||
|
||||
#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \
|
||||
NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
||||
|
||||
@@ -105,6 +126,7 @@ struct nv_drm_device {
|
||||
NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */];
|
||||
#endif
|
||||
|
||||
struct delayed_work hotplug_event_work;
|
||||
atomic_t enable_event_handling;
|
||||
|
||||
/**
|
||||
@@ -117,9 +139,26 @@ struct nv_drm_device {
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
NvU64 semsurf_stride;
|
||||
NvU64 semsurf_max_submitted_offset;
|
||||
#endif
|
||||
|
||||
NvBool hasVideoMemory;
|
||||
|
||||
NvBool supportsSyncpts;
|
||||
NvBool subOwnershipGranted;
|
||||
NvBool hasFramebufferConsole;
|
||||
|
||||
/**
|
||||
* @drmMasterChangedSinceLastAtomicCommit:
|
||||
*
|
||||
* This flag is set in nv_drm_master_set and reset after a completed atomic
|
||||
* commit. It is used to restore or recommit state that is lost by the
|
||||
* NvKms modeset owner change, such as the CRTC color management
|
||||
* properties.
|
||||
*/
|
||||
NvBool drmMasterChangedSinceLastAtomicCommit;
|
||||
|
||||
struct drm_property *nv_out_fence_property;
|
||||
struct drm_property *nv_input_colorspace_property;
|
||||
|
||||
134
kernel-open/nvidia-drm/nvidia-drm-sources.mk
Normal file
134
kernel-open/nvidia-drm/nvidia-drm-sources.mk
Normal file
@@ -0,0 +1,134 @@
|
||||
###########################################################################
|
||||
# Kbuild fragment for nvidia-drm.ko
|
||||
###########################################################################
|
||||
|
||||
#
|
||||
# Define NVIDIA_DRM_SOURCES
|
||||
#
|
||||
|
||||
NVIDIA_DRM_SOURCES =
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nv-kthread-q.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-os-interface.c
|
||||
|
||||
#
|
||||
# Register the conftests needed by nvidia-drm.ko
|
||||
#
|
||||
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
|
||||
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_fence_set_error
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += fence_set_error
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sync_file_get_fence
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_generic_setup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_ttm_setup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_helper_crtc_enable_color_mgmt
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_crtc_enable_color_mgmt
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set
|
||||
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_has_leases
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations_fop_unsigned_offset_present
|
||||
@@ -2,29 +2,16 @@
|
||||
# Kbuild fragment for nvidia-drm.ko
|
||||
###########################################################################
|
||||
|
||||
# Get our source file list and conftest list from the common file
|
||||
include $(src)/nvidia-drm/nvidia-drm-sources.mk
|
||||
|
||||
# Linux-specific sources
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
|
||||
|
||||
#
|
||||
# Define NVIDIA_DRM_{SOURCES,OBJECTS}
|
||||
#
|
||||
|
||||
NVIDIA_DRM_SOURCES =
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-prime-fence.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c
|
||||
|
||||
NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES))
|
||||
|
||||
obj-m += nvidia-drm.o
|
||||
@@ -43,83 +30,4 @@ NVIDIA_DRM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
|
||||
|
||||
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS))
|
||||
|
||||
#
|
||||
# Register the conftests needed by nvidia-drm.ko
|
||||
#
|
||||
|
||||
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS)
|
||||
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
|
||||
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
|
||||
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
|
||||
|
||||
@@ -45,6 +45,7 @@ int nv_drm_init(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvKms->setSuspendResumeCallback(nv_drm_suspend_resume);
|
||||
return nv_drm_probe_devices();
|
||||
#else
|
||||
return 0;
|
||||
@@ -54,6 +55,7 @@ int nv_drm_init(void)
|
||||
void nv_drm_exit(void)
|
||||
{
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
nvKms->setSuspendResumeCallback(NULL);
|
||||
nv_drm_remove_devices();
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -176,7 +176,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
||||
{
|
||||
|
||||
unsigned i, j;
|
||||
const static unsigned attempts = 3;
|
||||
static const unsigned attempts = 3;
|
||||
struct task_struct *thread[3];
|
||||
|
||||
for (i = 0;; i++) {
|
||||
@@ -201,7 +201,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
||||
|
||||
// Ran out of attempts - return thread even if its stack may not be
|
||||
// allocated on the preferred node
|
||||
if ((i == (attempts - 1)))
|
||||
if (i == (attempts - 1))
|
||||
break;
|
||||
|
||||
// Get the NUMA node where the first page of the stack is resident. If
|
||||
@@ -247,6 +247,11 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
|
||||
{
|
||||
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
|
||||
}
|
||||
|
||||
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
||||
// item was already pending in a queue.
|
||||
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
||||
@@ -301,7 +306,7 @@ static void _q_flush_function(void *args)
|
||||
static void _raw_q_flush(nv_kthread_q_t *q)
|
||||
{
|
||||
nv_kthread_q_item_t q_item;
|
||||
DECLARE_COMPLETION(completion);
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
|
||||
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-21 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -35,12 +35,13 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/cdev.h>
|
||||
|
||||
#include <acpi/video.h>
|
||||
|
||||
#include "nvstatus.h"
|
||||
|
||||
#include "nv-register-module.h"
|
||||
#include "nv-modeset-interface.h"
|
||||
#include "nv-kref.h"
|
||||
|
||||
@@ -53,8 +54,13 @@
|
||||
#include "nv-kthread-q.h"
|
||||
#include "nv-time.h"
|
||||
#include "nv-lock.h"
|
||||
#include "nv-chardev-numbers.h"
|
||||
|
||||
#if !defined(CONFIG_RETPOLINE)
|
||||
/*
|
||||
* Commit aefb2f2e619b ("x86/bugs: Rename CONFIG_RETPOLINE =>
|
||||
* CONFIG_MITIGATION_RETPOLINE) in v6.8 renamed CONFIG_RETPOLINE.
|
||||
*/
|
||||
#if !defined(CONFIG_RETPOLINE) && !defined(CONFIG_MITIGATION_RETPOLINE)
|
||||
#include "nv-retpoline.h"
|
||||
#endif
|
||||
|
||||
@@ -65,6 +71,21 @@
|
||||
static bool output_rounding_fix = true;
|
||||
module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
|
||||
|
||||
static bool disable_hdmi_frl = false;
|
||||
module_param_named(disable_hdmi_frl, disable_hdmi_frl, bool, 0400);
|
||||
|
||||
static bool disable_vrr_memclk_switch = false;
|
||||
module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
|
||||
|
||||
static bool hdmi_deepcolor = false;
|
||||
module_param_named(hdmi_deepcolor, hdmi_deepcolor, bool, 0400);
|
||||
|
||||
static bool vblank_sem_control = false;
|
||||
module_param_named(vblank_sem_control, vblank_sem_control, bool, 0400);
|
||||
|
||||
static bool opportunistic_display_sync = true;
|
||||
module_param_named(opportunistic_display_sync, opportunistic_display_sync, bool, 0400);
|
||||
|
||||
/* These parameters are used for fault injection tests. Normally the defaults
|
||||
* should be used. */
|
||||
MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
|
||||
@@ -75,6 +96,17 @@ MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on modul
|
||||
static bool malloc_verbose = false;
|
||||
module_param_named(malloc_verbose, malloc_verbose, bool, 0400);
|
||||
|
||||
#if NVKMS_CONFIG_FILE_SUPPORTED
|
||||
/* This parameter is used to find the dpy override conf file */
|
||||
#define NVKMS_CONF_FILE_SPECIFIED (nvkms_conf != NULL)
|
||||
|
||||
MODULE_PARM_DESC(config_file,
|
||||
"Path to the nvidia-modeset configuration file "
|
||||
"(default: disabled)");
|
||||
static char *nvkms_conf = NULL;
|
||||
module_param_named(config_file, nvkms_conf, charp, 0400);
|
||||
#endif
|
||||
|
||||
static atomic_t nvkms_alloc_called_count;
|
||||
|
||||
NvBool nvkms_output_rounding_fix(void)
|
||||
@@ -82,6 +114,31 @@ NvBool nvkms_output_rounding_fix(void)
|
||||
return output_rounding_fix;
|
||||
}
|
||||
|
||||
NvBool nvkms_disable_hdmi_frl(void)
|
||||
{
|
||||
return disable_hdmi_frl;
|
||||
}
|
||||
|
||||
NvBool nvkms_disable_vrr_memclk_switch(void)
|
||||
{
|
||||
return disable_vrr_memclk_switch;
|
||||
}
|
||||
|
||||
NvBool nvkms_hdmi_deepcolor(void)
|
||||
{
|
||||
return hdmi_deepcolor;
|
||||
}
|
||||
|
||||
NvBool nvkms_vblank_sem_control(void)
|
||||
{
|
||||
return vblank_sem_control;
|
||||
}
|
||||
|
||||
NvBool nvkms_opportunistic_display_sync(void)
|
||||
{
|
||||
return opportunistic_display_sync;
|
||||
}
|
||||
|
||||
#define NVKMS_SYNCPT_STUBS_NEEDED
|
||||
|
||||
/*************************************************************************
|
||||
@@ -183,9 +240,23 @@ static inline int nvkms_read_trylock_pm_lock(void)
|
||||
|
||||
static inline void nvkms_read_lock_pm_lock(void)
|
||||
{
|
||||
while (!down_read_trylock(&nvkms_pm_lock)) {
|
||||
try_to_freeze();
|
||||
cond_resched();
|
||||
if ((current->flags & PF_NOFREEZE)) {
|
||||
/*
|
||||
* Non-freezable tasks (i.e. kthreads in this case) don't have to worry
|
||||
* about being frozen during system suspend, but do need to block so
|
||||
* that the CPU can go idle during s2idle. Do a normal uninterruptible
|
||||
* blocking wait for the PM lock.
|
||||
*/
|
||||
down_read(&nvkms_pm_lock);
|
||||
} else {
|
||||
/*
|
||||
* For freezable tasks, make sure we give the kernel an opportunity to
|
||||
* freeze if taking the PM lock fails.
|
||||
*/
|
||||
while (!down_read_trylock(&nvkms_pm_lock)) {
|
||||
try_to_freeze();
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,7 +389,7 @@ NvU64 nvkms_get_usec(void)
|
||||
struct timespec64 ts;
|
||||
NvU64 ns;
|
||||
|
||||
ktime_get_real_ts64(&ts);
|
||||
ktime_get_raw_ts64(&ts);
|
||||
|
||||
ns = timespec64_to_ns(&ts);
|
||||
return ns / 1000;
|
||||
@@ -432,6 +503,8 @@ nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel,
|
||||
|
||||
static void nvkms_suspend(NvU32 gpuId)
|
||||
{
|
||||
nvKmsKapiSuspendResume(NV_TRUE /* suspend */);
|
||||
|
||||
if (gpuId == 0) {
|
||||
nvkms_write_lock_pm_lock();
|
||||
}
|
||||
@@ -450,6 +523,8 @@ static void nvkms_resume(NvU32 gpuId)
|
||||
if (gpuId == 0) {
|
||||
nvkms_write_unlock_pm_lock();
|
||||
}
|
||||
|
||||
nvKmsKapiSuspendResume(NV_FALSE /* suspend */);
|
||||
}
|
||||
|
||||
|
||||
@@ -778,49 +853,6 @@ void nvkms_free_timer(nvkms_timer_handle_t *handle)
|
||||
timer->cancel = NV_TRUE;
|
||||
}
|
||||
|
||||
void* nvkms_get_per_open_data(int fd)
|
||||
{
|
||||
struct file *filp = fget(fd);
|
||||
struct nvkms_per_open *popen = NULL;
|
||||
dev_t rdev = 0;
|
||||
void *data = NULL;
|
||||
|
||||
if (filp == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (filp->f_inode == NULL) {
|
||||
goto done;
|
||||
}
|
||||
rdev = filp->f_inode->i_rdev;
|
||||
|
||||
if ((MAJOR(rdev) != NVKMS_MAJOR_DEVICE_NUMBER) ||
|
||||
(MINOR(rdev) != NVKMS_MINOR_DEVICE_NUMBER)) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
popen = filp->private_data;
|
||||
if (popen == NULL) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
data = popen->data;
|
||||
|
||||
done:
|
||||
/*
|
||||
* fget() incremented the struct file's reference count, which
|
||||
* needs to be balanced with a call to fput(). It is safe to
|
||||
* decrement the reference count before returning
|
||||
* filp->private_data because core NVKMS is currently holding the
|
||||
* nvkms_lock, which prevents the nvkms_close() => nvKmsClose()
|
||||
* call chain from freeing the file out from under the caller of
|
||||
* nvkms_get_per_open_data().
|
||||
*/
|
||||
fput(filp);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
NvBool nvkms_fd_is_nvidia_chardev(int fd)
|
||||
{
|
||||
struct file *filp = fget(fd);
|
||||
@@ -1038,7 +1070,7 @@ static void nvkms_kapi_event_kthread_q_callback(void *arg)
|
||||
nvKmsKapiHandleEventQueueChange(device);
|
||||
}
|
||||
|
||||
struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type,
|
||||
static struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type,
|
||||
struct NvKmsKapiDevice *device,
|
||||
int *status)
|
||||
{
|
||||
@@ -1090,7 +1122,7 @@ failed:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void nvkms_close_pm_locked(struct nvkms_per_open *popen)
|
||||
static void nvkms_close_pm_locked(struct nvkms_per_open *popen)
|
||||
{
|
||||
/*
|
||||
* Don't use down_interruptible(): we need to free resources
|
||||
@@ -1153,7 +1185,7 @@ static void nvkms_close_popen(struct nvkms_per_open *popen)
|
||||
}
|
||||
}
|
||||
|
||||
int nvkms_ioctl_common
|
||||
static int nvkms_ioctl_common
|
||||
(
|
||||
struct nvkms_per_open *popen,
|
||||
NvU32 cmd, NvU64 address, const size_t size
|
||||
@@ -1362,6 +1394,123 @@ static void nvkms_proc_exit(void)
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
* NVKMS Config File Read
|
||||
************************************************************************/
|
||||
#if NVKMS_CONFIG_FILE_SUPPORTED
|
||||
static NvBool nvkms_fs_mounted(void)
|
||||
{
|
||||
return current->fs != NULL;
|
||||
}
|
||||
|
||||
static size_t nvkms_config_file_open
|
||||
(
|
||||
char *fname,
|
||||
char ** const buff
|
||||
)
|
||||
{
|
||||
int i = 0;
|
||||
struct file *file;
|
||||
struct inode *file_inode;
|
||||
size_t file_size = 0;
|
||||
size_t read_size = 0;
|
||||
#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
|
||||
loff_t pos = 0;
|
||||
#endif
|
||||
|
||||
if (!nvkms_fs_mounted()) {
|
||||
printk(KERN_ERR NVKMS_LOG_PREFIX "ERROR: Filesystems not mounted\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
file = filp_open(fname, O_RDONLY, 0);
|
||||
if (file == NULL || IS_ERR(file)) {
|
||||
printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Failed to open %s\n",
|
||||
fname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
file_inode = file->f_inode;
|
||||
if (file_inode == NULL || IS_ERR(file_inode)) {
|
||||
printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Inode is invalid\n");
|
||||
goto done;
|
||||
}
|
||||
file_size = file_inode->i_size;
|
||||
if (file_size > NVKMS_READ_FILE_MAX_SIZE) {
|
||||
printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: File exceeds maximum size\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
*buff = nvkms_alloc(file_size, NV_FALSE);
|
||||
if (*buff == NULL) {
|
||||
printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Out of memory\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Once we have access to GPL symbols, this can be replaced with
|
||||
* kernel_read_file for kernels >= 4.6
|
||||
*/
|
||||
while ((read_size < file_size) && (i++ < NVKMS_READ_FILE_MAX_LOOPS)) {
|
||||
#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
|
||||
ssize_t ret = kernel_read(file, *buff + read_size,
|
||||
file_size - read_size, &pos);
|
||||
#else
|
||||
ssize_t ret = kernel_read(file, read_size,
|
||||
*buff + read_size,
|
||||
file_size - read_size);
|
||||
#endif
|
||||
if (ret <= 0) {
|
||||
break;
|
||||
}
|
||||
read_size += ret;
|
||||
}
|
||||
|
||||
if (read_size != file_size) {
|
||||
printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Failed to read %s\n",
|
||||
fname);
|
||||
goto done;
|
||||
}
|
||||
|
||||
filp_close(file, current->files);
|
||||
return file_size;
|
||||
|
||||
done:
|
||||
nvkms_free(*buff, file_size);
|
||||
filp_close(file, current->files);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* must be called with nvkms_lock locked */
|
||||
static void nvkms_read_config_file_locked(void)
|
||||
{
|
||||
char *buffer = NULL;
|
||||
size_t buf_size = 0;
|
||||
|
||||
/* only read the config file if the kernel parameter is set */
|
||||
if (!NVKMS_CONF_FILE_SPECIFIED) {
|
||||
return;
|
||||
}
|
||||
|
||||
buf_size = nvkms_config_file_open(nvkms_conf, &buffer);
|
||||
|
||||
if (buf_size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (nvKmsReadConf(buffer, buf_size, nvkms_config_file_open)) {
|
||||
printk(KERN_INFO NVKMS_LOG_PREFIX "Successfully read %s\n",
|
||||
nvkms_conf);
|
||||
}
|
||||
|
||||
nvkms_free(buffer, buf_size);
|
||||
}
|
||||
#else
|
||||
static void nvkms_read_config_file_locked(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*************************************************************************
|
||||
* NVKMS KAPI functions
|
||||
************************************************************************/
|
||||
@@ -1455,6 +1604,12 @@ static int nvkms_ioctl(struct inode *inode, struct file *filp,
|
||||
return status;
|
||||
}
|
||||
|
||||
static long nvkms_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return nvkms_ioctl(filp->f_inode, filp, cmd, arg);
|
||||
}
|
||||
|
||||
static unsigned int nvkms_poll(struct file *filp, poll_table *wait)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
@@ -1482,17 +1637,73 @@ static unsigned int nvkms_poll(struct file *filp, poll_table *wait)
|
||||
* Module loading support code.
|
||||
*************************************************************************/
|
||||
|
||||
static nvidia_module_t nvidia_modeset_module = {
|
||||
#define NVKMS_RDEV (MKDEV(NV_MAJOR_DEVICE_NUMBER, \
|
||||
NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE))
|
||||
|
||||
static struct file_operations nvkms_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.module_name = "nvidia-modeset",
|
||||
.instance = 1, /* minor number: 255-1=254 */
|
||||
.open = nvkms_open,
|
||||
.close = nvkms_close,
|
||||
.mmap = nvkms_mmap,
|
||||
.ioctl = nvkms_ioctl,
|
||||
.poll = nvkms_poll,
|
||||
.unlocked_ioctl = nvkms_unlocked_ioctl,
|
||||
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
|
||||
.compat_ioctl = nvkms_unlocked_ioctl,
|
||||
#endif
|
||||
.mmap = nvkms_mmap,
|
||||
.open = nvkms_open,
|
||||
.release = nvkms_close,
|
||||
};
|
||||
|
||||
static struct cdev nvkms_device_cdev;
|
||||
|
||||
static int __init nvkms_register_chrdev(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_chrdev_region(NVKMS_RDEV, 1, "nvidia-modeset");
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
cdev_init(&nvkms_device_cdev, &nvkms_fops);
|
||||
ret = cdev_add(&nvkms_device_cdev, NVKMS_RDEV, 1);
|
||||
if (ret < 0) {
|
||||
unregister_chrdev_region(NVKMS_RDEV, 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nvkms_unregister_chrdev(void)
|
||||
{
|
||||
cdev_del(&nvkms_device_cdev);
|
||||
unregister_chrdev_region(NVKMS_RDEV, 1);
|
||||
}
|
||||
|
||||
void* nvkms_get_per_open_data(int fd)
|
||||
{
|
||||
struct file *filp = fget(fd);
|
||||
void *data = NULL;
|
||||
|
||||
if (filp) {
|
||||
if (filp->f_op == &nvkms_fops && filp->private_data) {
|
||||
struct nvkms_per_open *popen = filp->private_data;
|
||||
data = popen->data;
|
||||
}
|
||||
|
||||
/*
|
||||
* fget() incremented the struct file's reference count, which needs to
|
||||
* be balanced with a call to fput(). It is safe to decrement the
|
||||
* reference count before returning filp->private_data because core
|
||||
* NVKMS is currently holding the nvkms_lock, which prevents the
|
||||
* nvkms_close() => nvKmsClose() call chain from freeing the file out
|
||||
* from under the caller of nvkms_get_per_open_data().
|
||||
*/
|
||||
fput(filp);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static int __init nvkms_init(void)
|
||||
{
|
||||
int ret;
|
||||
@@ -1523,28 +1734,29 @@ static int __init nvkms_init(void)
|
||||
INIT_LIST_HEAD(&nvkms_timers.list);
|
||||
spin_lock_init(&nvkms_timers.lock);
|
||||
|
||||
ret = nvidia_register_module(&nvidia_modeset_module);
|
||||
|
||||
ret = nvkms_register_chrdev();
|
||||
if (ret != 0) {
|
||||
goto fail_register_module;
|
||||
goto fail_register_chrdev;
|
||||
}
|
||||
|
||||
down(&nvkms_lock);
|
||||
if (!nvKmsModuleLoad()) {
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
up(&nvkms_lock);
|
||||
if (ret != 0) {
|
||||
up(&nvkms_lock);
|
||||
goto fail_module_load;
|
||||
}
|
||||
nvkms_read_config_file_locked();
|
||||
up(&nvkms_lock);
|
||||
|
||||
nvkms_proc_init();
|
||||
|
||||
return 0;
|
||||
|
||||
fail_module_load:
|
||||
nvidia_unregister_module(&nvidia_modeset_module);
|
||||
fail_register_module:
|
||||
nvkms_unregister_chrdev();
|
||||
fail_register_chrdev:
|
||||
nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
|
||||
fail_deferred_close_kthread:
|
||||
nv_kthread_q_stop(&nvkms_kthread_q);
|
||||
@@ -1608,7 +1820,7 @@ restart:
|
||||
nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
|
||||
nv_kthread_q_stop(&nvkms_kthread_q);
|
||||
|
||||
nvidia_unregister_module(&nvidia_modeset_module);
|
||||
nvkms_unregister_chrdev();
|
||||
nvkms_free_rm();
|
||||
|
||||
if (malloc_verbose) {
|
||||
|
||||
@@ -40,17 +40,31 @@
|
||||
#include "nv_stdarg.h"
|
||||
|
||||
enum NvKmsSyncPtOp {
|
||||
/*
|
||||
* Call into Tegra's kernel nvhost driver, and allocate a syncpoint that can
|
||||
* be exclusively used by the caller. Internally, this operation will call
|
||||
* get() to set the initial refcount of the syncpoint to 1.
|
||||
*/
|
||||
NVKMS_SYNCPT_OP_ALLOC,
|
||||
NVKMS_SYNCPT_OP_GET,
|
||||
/*
|
||||
* Decrease the refcount of an already allocated syncpoint. Once the
|
||||
* refcount drops to 0, the syncpoint will be returned to the free pool that
|
||||
* nvhost manages, so PUT can also be used to balance out an ALLOC.
|
||||
*/
|
||||
NVKMS_SYNCPT_OP_PUT,
|
||||
NVKMS_SYNCPT_OP_INCR_MAX,
|
||||
NVKMS_SYNCPT_OP_CPU_INCR,
|
||||
/*
|
||||
* Extract syncpt id and thresh from the sync-file file descriptor
|
||||
*/
|
||||
NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH,
|
||||
/*
|
||||
* Create dma-fence from syncpt id and thresh value and create sync_file
|
||||
* file descriptor for the dma-fence handle created.
|
||||
*/
|
||||
NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD,
|
||||
/*
|
||||
* read syncpt minimum value of given syncpt
|
||||
*/
|
||||
NVKMS_SYNCPT_OP_READ_MINVAL,
|
||||
NVKMS_SYNCPT_OP_READ_MAXVAL,
|
||||
NVKMS_SYNCPT_OP_SET_MIN_EQ_MAX,
|
||||
NVKMS_SYNCPT_OP_SET_MAXVAL,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
@@ -60,24 +74,10 @@ typedef struct {
|
||||
NvU32 id; /* out */
|
||||
} alloc;
|
||||
|
||||
struct {
|
||||
NvU32 id; /* in */
|
||||
} get;
|
||||
|
||||
struct {
|
||||
NvU32 id; /* in */
|
||||
} put;
|
||||
|
||||
struct {
|
||||
NvU32 id; /* in */
|
||||
NvU32 incr; /* in */
|
||||
NvU32 value; /* out */
|
||||
} incr_max;
|
||||
|
||||
struct {
|
||||
NvU32 id; /* in */
|
||||
} cpu_incr;
|
||||
|
||||
struct {
|
||||
NvS32 fd; /* in */
|
||||
NvU32 id; /* out */
|
||||
@@ -94,23 +94,14 @@ typedef struct {
|
||||
NvU32 id; /* in */
|
||||
NvU32 minval; /* out */
|
||||
} read_minval;
|
||||
|
||||
struct {
|
||||
NvU32 id; /* in */
|
||||
NvU32 maxval; /* out */
|
||||
} read_maxval;
|
||||
|
||||
struct {
|
||||
NvU32 id; /* in */
|
||||
} set_min_eq_max;
|
||||
|
||||
struct {
|
||||
NvU32 id; /* in */
|
||||
NvU32 val; /* in */
|
||||
} set_maxval;
|
||||
} NvKmsSyncPtOpParams;
|
||||
|
||||
NvBool nvkms_output_rounding_fix(void);
|
||||
NvBool nvkms_disable_hdmi_frl(void);
|
||||
NvBool nvkms_disable_vrr_memclk_switch(void);
|
||||
NvBool nvkms_hdmi_deepcolor(void);
|
||||
NvBool nvkms_vblank_sem_control(void);
|
||||
NvBool nvkms_opportunistic_display_sync(void);
|
||||
|
||||
void nvkms_call_rm (void *ops);
|
||||
void* nvkms_alloc (size_t size,
|
||||
|
||||
@@ -40,9 +40,6 @@ NV_KERNEL_MODULE_TARGETS += $(NVIDIA_MODESET_KO)
|
||||
NVIDIA_MODESET_BINARY_OBJECT := $(src)/nvidia-modeset/nv-modeset-kernel.o_binary
|
||||
NVIDIA_MODESET_BINARY_OBJECT_O := nvidia-modeset/nv-modeset-kernel.o
|
||||
|
||||
quiet_cmd_symlink = SYMLINK $@
|
||||
cmd_symlink = ln -sf $< $@
|
||||
|
||||
targets += $(NVIDIA_MODESET_BINARY_OBJECT_O)
|
||||
|
||||
$(obj)/$(NVIDIA_MODESET_BINARY_OBJECT_O): $(NVIDIA_MODESET_BINARY_OBJECT) FORCE
|
||||
@@ -58,6 +55,18 @@ nvidia-modeset-y += $(NVIDIA_MODESET_BINARY_OBJECT_O)
|
||||
NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset
|
||||
NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
|
||||
|
||||
# Some Android kernels prohibit driver use of filesystem functions like
|
||||
# filp_open() and kernel_read(). Disable the NVKMS_CONFIG_FILE_SUPPORTED
|
||||
# functionality that uses those functions when building for Android.
|
||||
|
||||
PLATFORM_IS_ANDROID ?= 0
|
||||
|
||||
ifeq ($(PLATFORM_IS_ANDROID),1)
|
||||
NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=0
|
||||
else
|
||||
NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=1
|
||||
endif
|
||||
|
||||
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_MODESET_OBJECTS), $(NVIDIA_MODESET_CFLAGS))
|
||||
|
||||
|
||||
|
||||
@@ -42,6 +42,20 @@ typedef void nvkms_procfs_proc_t(void *data,
|
||||
char *buffer, size_t size,
|
||||
nvkms_procfs_out_string_func_t *outString);
|
||||
|
||||
/* max number of loops to prevent hanging the kernel if an edge case is hit */
|
||||
#define NVKMS_READ_FILE_MAX_LOOPS 1000
|
||||
/* max size for any file read by the config system */
|
||||
#define NVKMS_READ_FILE_MAX_SIZE 8192
|
||||
|
||||
/*
|
||||
* The read file callback should allocate a buffer pointed to by *buff, fill it
|
||||
* with the contents of fname, and return the size of the buffer. Buffer is not
|
||||
* guaranteed to be null-terminated. The caller is responsible for freeing the
|
||||
* buffer with nvkms_free, not nvFree.
|
||||
*/
|
||||
typedef size_t nvkms_config_read_file_func_t(char *fname,
|
||||
char ** const buff);
|
||||
|
||||
typedef struct {
|
||||
const char *name;
|
||||
nvkms_procfs_proc_t *func;
|
||||
@@ -52,6 +66,8 @@ enum NvKmsClientType {
|
||||
NVKMS_CLIENT_KERNEL_SPACE,
|
||||
};
|
||||
|
||||
struct NvKmsPerOpenDev;
|
||||
|
||||
NvBool nvKmsIoctl(
|
||||
void *pOpenVoid,
|
||||
NvU32 cmd,
|
||||
@@ -74,6 +90,9 @@ void nvKmsResume(NvU32 gpuId);
|
||||
|
||||
void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles);
|
||||
|
||||
NvBool nvKmsReadConf(const char *buff, size_t size,
|
||||
nvkms_config_read_file_func_t readfile);
|
||||
|
||||
void nvKmsKapiHandleEventQueueChange
|
||||
(
|
||||
struct NvKmsKapiDevice *device
|
||||
@@ -84,7 +103,11 @@ NvBool nvKmsKapiGetFunctionsTableInternal
|
||||
struct NvKmsKapiFunctionsTable *funcsTable
|
||||
);
|
||||
|
||||
void nvKmsKapiSuspendResume(NvBool suspend);
|
||||
|
||||
NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness);
|
||||
NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness);
|
||||
|
||||
NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev);
|
||||
|
||||
#endif /* __NV_KMS_H__ */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -94,11 +94,10 @@ struct nvidia_p2p_params {
|
||||
} nvidia_p2p_params_t;
|
||||
|
||||
/*
|
||||
* Capability flag for users to detect
|
||||
* Macro for users to detect
|
||||
* driver support for persistent pages.
|
||||
*/
|
||||
extern int nvidia_p2p_cap_persistent_pages;
|
||||
#define NVIDIA_P2P_CAP_PERSISTENT_PAGES
|
||||
#define NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
|
||||
|
||||
/*
|
||||
* This API is not supported.
|
||||
@@ -173,11 +172,6 @@ struct nvidia_p2p_page_table {
|
||||
* A pointer to the function to be invoked when the pages
|
||||
* underlying the virtual address range are freed
|
||||
* implicitly.
|
||||
* If NULL, persistent pages will be returned.
|
||||
* This means the pages underlying the range of GPU virtual memory
|
||||
* will persist until explicitly freed by nvidia_p2p_put_pages().
|
||||
* Persistent GPU memory mappings are not supported on PowerPC,
|
||||
* MIG-enabled devices and vGPU.
|
||||
* @param[in] data
|
||||
* A non-NULL opaque pointer to private data to be passed to the
|
||||
* callback function.
|
||||
@@ -190,12 +184,48 @@ struct nvidia_p2p_page_table {
|
||||
* insufficient resources were available to complete the operation.
|
||||
* -EIO if an unknown error occurred.
|
||||
*/
|
||||
int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space,
|
||||
uint64_t virtual_address,
|
||||
int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
|
||||
uint64_t virtual_address, uint64_t length,
|
||||
struct nvidia_p2p_page_table **page_table,
|
||||
void (*free_callback)(void *data), void *data);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Pin and make the pages underlying a range of GPU virtual memory
|
||||
* accessible to a third-party device. The pages will persist until
|
||||
* explicitly freed by nvidia_p2p_put_pages_persistent().
|
||||
*
|
||||
* Persistent GPU memory mappings are not supported on PowerPC,
|
||||
* MIG-enabled devices and vGPU.
|
||||
*
|
||||
* This API only supports pinned, GPU-resident memory, such as that provided
|
||||
* by cudaMalloc().
|
||||
*
|
||||
* This API may sleep.
|
||||
*
|
||||
* @param[in] virtual_address
|
||||
* The start address in the specified virtual address space.
|
||||
* Address must be aligned to the 64KB boundary.
|
||||
* @param[in] length
|
||||
* The length of the requested P2P mapping.
|
||||
* Length must be a multiple of 64KB.
|
||||
* @param[out] page_table
|
||||
* A pointer to an array of structures with P2P PTEs.
|
||||
* @param[in] flags
|
||||
* Must be set to zero for now.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL if an invalid argument was supplied.
|
||||
* -ENOTSUPP if the requested operation is not supported.
|
||||
* -ENOMEM if the driver failed to allocate memory or if
|
||||
* insufficient resources were available to complete the operation.
|
||||
* -EIO if an unknown error occurred.
|
||||
*/
|
||||
int nvidia_p2p_get_pages_persistent(uint64_t virtual_address,
|
||||
uint64_t length,
|
||||
struct nvidia_p2p_page_table **page_table,
|
||||
void (*free_callback)(void *data),
|
||||
void *data);
|
||||
uint32_t flags);
|
||||
|
||||
#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003
|
||||
|
||||
@@ -268,6 +298,8 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
|
||||
* Release a set of pages previously made accessible to
|
||||
* a third-party device.
|
||||
*
|
||||
* This API may sleep.
|
||||
*
|
||||
* @param[in] p2p_token
|
||||
* A token that uniquely identifies the P2P mapping.
|
||||
* @param[in] va_space
|
||||
@@ -282,10 +314,33 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
|
||||
* -EINVAL if an invalid argument was supplied.
|
||||
* -EIO if an unknown error occurred.
|
||||
*/
|
||||
int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space,
|
||||
uint64_t virtual_address,
|
||||
int nvidia_p2p_put_pages(uint64_t p2p_token,
|
||||
uint32_t va_space, uint64_t virtual_address,
|
||||
struct nvidia_p2p_page_table *page_table);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Release a set of persistent pages previously made accessible to
|
||||
* a third-party device.
|
||||
*
|
||||
* This API may sleep.
|
||||
*
|
||||
* @param[in] virtual_address
|
||||
* The start address in the specified virtual address space.
|
||||
* @param[in] page_table
|
||||
* A pointer to the array of structures with P2P PTEs.
|
||||
* @param[in] flags
|
||||
* Must be set to zero for now.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -EINVAL if an invalid argument was supplied.
|
||||
* -EIO if an unknown error occurred.
|
||||
*/
|
||||
int nvidia_p2p_put_pages_persistent(uint64_t virtual_address,
|
||||
struct nvidia_p2p_page_table *page_table,
|
||||
uint32_t flags);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Free a third-party P2P page table. (This function is a no-op.)
|
||||
|
||||
@@ -1,20 +1,25 @@
|
||||
/* SPDX-License-Identifier: Linux-OpenIB */
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
@@ -43,7 +48,9 @@
|
||||
|
||||
MODULE_AUTHOR("Yishai Hadas");
|
||||
MODULE_DESCRIPTION("NVIDIA GPU memory plug-in");
|
||||
MODULE_LICENSE("Linux-OpenIB");
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
enum {
|
||||
NV_MEM_PEERDIRECT_SUPPORT_DEFAULT = 0,
|
||||
@@ -53,7 +60,13 @@ static int peerdirect_support = NV_MEM_PEERDIRECT_SUPPORT_DEFAULT;
|
||||
module_param(peerdirect_support, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(peerdirect_support, "Set level of support for Peer-direct, 0 [default] or 1 [legacy, for example MLNX_OFED 4.9 LTS]");
|
||||
|
||||
#define peer_err(FMT, ARGS...) printk(KERN_ERR "nvidia-peermem" " %s:%d " FMT, __FUNCTION__, __LINE__, ## ARGS)
|
||||
|
||||
#define peer_err(FMT, ARGS...) printk(KERN_ERR "nvidia-peermem" " %s:%d ERROR " FMT, __FUNCTION__, __LINE__, ## ARGS)
|
||||
#ifdef NV_MEM_DEBUG
|
||||
#define peer_trace(FMT, ARGS...) printk(KERN_DEBUG "nvidia-peermem" " %s:%d TRACE " FMT, __FUNCTION__, __LINE__, ## ARGS)
|
||||
#else
|
||||
#define peer_trace(FMT, ARGS...) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT)
|
||||
|
||||
@@ -74,7 +87,10 @@ invalidate_peer_memory mem_invalidate_callback;
|
||||
static void *reg_handle = NULL;
|
||||
static void *reg_handle_nc = NULL;
|
||||
|
||||
#define NV_MEM_CONTEXT_MAGIC ((u64)0xF1F4F1D0FEF0DAD0ULL)
|
||||
|
||||
struct nv_mem_context {
|
||||
u64 pad1;
|
||||
struct nvidia_p2p_page_table *page_table;
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping;
|
||||
u64 core_context;
|
||||
@@ -86,8 +102,22 @@ struct nv_mem_context {
|
||||
struct task_struct *callback_task;
|
||||
int sg_allocated;
|
||||
struct sg_table sg_head;
|
||||
u64 pad2;
|
||||
};
|
||||
|
||||
#define NV_MEM_CONTEXT_CHECK_OK(MC) ({ \
|
||||
struct nv_mem_context *mc = (MC); \
|
||||
int rc = ((0 != mc) && \
|
||||
(READ_ONCE(mc->pad1) == NV_MEM_CONTEXT_MAGIC) && \
|
||||
(READ_ONCE(mc->pad2) == NV_MEM_CONTEXT_MAGIC)); \
|
||||
if (!rc) { \
|
||||
peer_trace("invalid nv_mem_context=%px pad1=%016llx pad2=%016llx\n", \
|
||||
mc, \
|
||||
mc?mc->pad1:0, \
|
||||
mc?mc->pad2:0); \
|
||||
} \
|
||||
rc; \
|
||||
})
|
||||
|
||||
static void nv_get_p2p_free_callback(void *data)
|
||||
{
|
||||
@@ -97,8 +127,9 @@ static void nv_get_p2p_free_callback(void *data)
|
||||
struct nvidia_p2p_dma_mapping *dma_mapping = NULL;
|
||||
|
||||
__module_get(THIS_MODULE);
|
||||
if (!nv_mem_context) {
|
||||
peer_err("nv_get_p2p_free_callback -- invalid nv_mem_context\n");
|
||||
|
||||
if (!NV_MEM_CONTEXT_CHECK_OK(nv_mem_context)) {
|
||||
peer_err("detected invalid context, skipping further processing\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -169,9 +200,11 @@ static int nv_mem_acquire(unsigned long addr, size_t size, void *peer_mem_privat
|
||||
/* Error case handled as not mine */
|
||||
return 0;
|
||||
|
||||
nv_mem_context->pad1 = NV_MEM_CONTEXT_MAGIC;
|
||||
nv_mem_context->page_virt_start = addr & GPU_PAGE_MASK;
|
||||
nv_mem_context->page_virt_end = (addr + size + GPU_PAGE_SIZE - 1) & GPU_PAGE_MASK;
|
||||
nv_mem_context->mapped_size = nv_mem_context->page_virt_end - nv_mem_context->page_virt_start;
|
||||
nv_mem_context->pad2 = NV_MEM_CONTEXT_MAGIC;
|
||||
|
||||
ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size,
|
||||
&nv_mem_context->page_table, nv_mem_dummy_callback, nv_mem_context);
|
||||
@@ -195,6 +228,7 @@ static int nv_mem_acquire(unsigned long addr, size_t size, void *peer_mem_privat
|
||||
return 1;
|
||||
|
||||
err:
|
||||
memset(nv_mem_context, 0, sizeof(*nv_mem_context));
|
||||
kfree(nv_mem_context);
|
||||
|
||||
/* Error case handled as not mine */
|
||||
@@ -249,8 +283,8 @@ static int nv_dma_map(struct sg_table *sg_head, void *context,
|
||||
nv_mem_context->sg_allocated = 1;
|
||||
for_each_sg(sg_head->sgl, sg, nv_mem_context->npages, i) {
|
||||
sg_set_page(sg, NULL, nv_mem_context->page_size, 0);
|
||||
sg->dma_address = dma_mapping->dma_addresses[i];
|
||||
sg->dma_length = nv_mem_context->page_size;
|
||||
sg_dma_address(sg) = dma_mapping->dma_addresses[i];
|
||||
sg_dma_len(sg) = nv_mem_context->page_size;
|
||||
}
|
||||
nv_mem_context->sg_head = *sg_head;
|
||||
*nmap = nv_mem_context->npages;
|
||||
@@ -284,8 +318,9 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void nv_mem_put_pages(struct sg_table *sg_head, void *context)
|
||||
static void nv_mem_put_pages_common(int nc,
|
||||
struct sg_table *sg_head,
|
||||
void *context)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nv_mem_context *nv_mem_context =
|
||||
@@ -302,8 +337,18 @@ static void nv_mem_put_pages(struct sg_table *sg_head, void *context)
|
||||
if (nv_mem_context->callback_task == current)
|
||||
return;
|
||||
|
||||
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
|
||||
nv_mem_context->page_table);
|
||||
if (nc) {
|
||||
#ifdef NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
|
||||
ret = nvidia_p2p_put_pages_persistent(nv_mem_context->page_virt_start,
|
||||
nv_mem_context->page_table, 0);
|
||||
#else
|
||||
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
|
||||
nv_mem_context->page_table);
|
||||
#endif
|
||||
} else {
|
||||
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
|
||||
nv_mem_context->page_table);
|
||||
}
|
||||
|
||||
#ifdef _DEBUG_ONLY_
|
||||
/* Here we expect an error in real life cases that should be ignored - not printed.
|
||||
@@ -318,6 +363,16 @@ static void nv_mem_put_pages(struct sg_table *sg_head, void *context)
|
||||
return;
|
||||
}
|
||||
|
||||
static void nv_mem_put_pages(struct sg_table *sg_head, void *context)
|
||||
{
|
||||
nv_mem_put_pages_common(0, sg_head, context);
|
||||
}
|
||||
|
||||
static void nv_mem_put_pages_nc(struct sg_table *sg_head, void *context)
|
||||
{
|
||||
nv_mem_put_pages_common(1, sg_head, context);
|
||||
}
|
||||
|
||||
static void nv_mem_release(void *context)
|
||||
{
|
||||
struct nv_mem_context *nv_mem_context =
|
||||
@@ -326,6 +381,7 @@ static void nv_mem_release(void *context)
|
||||
sg_free_table(&nv_mem_context->sg_head);
|
||||
nv_mem_context->sg_allocated = 0;
|
||||
}
|
||||
memset(nv_mem_context, 0, sizeof(*nv_mem_context));
|
||||
kfree(nv_mem_context);
|
||||
module_put(THIS_MODULE);
|
||||
return;
|
||||
@@ -396,8 +452,15 @@ static int nv_mem_get_pages_nc(unsigned long addr,
|
||||
nv_mem_context->core_context = core_context;
|
||||
nv_mem_context->page_size = GPU_PAGE_SIZE;
|
||||
|
||||
#ifdef NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
|
||||
ret = nvidia_p2p_get_pages_persistent(nv_mem_context->page_virt_start,
|
||||
nv_mem_context->mapped_size,
|
||||
&nv_mem_context->page_table, 0);
|
||||
#else
|
||||
ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size,
|
||||
&nv_mem_context->page_table, NULL, NULL);
|
||||
#endif
|
||||
|
||||
if (ret < 0) {
|
||||
peer_err("error %d while calling nvidia_p2p_get_pages() with NULL callback\n", ret);
|
||||
return ret;
|
||||
@@ -407,13 +470,13 @@ static int nv_mem_get_pages_nc(unsigned long addr,
|
||||
}
|
||||
|
||||
static struct peer_memory_client nv_mem_client_nc = {
|
||||
.acquire = nv_mem_acquire,
|
||||
.get_pages = nv_mem_get_pages_nc,
|
||||
.dma_map = nv_dma_map,
|
||||
.dma_unmap = nv_dma_unmap,
|
||||
.put_pages = nv_mem_put_pages,
|
||||
.get_page_size = nv_mem_get_page_size,
|
||||
.release = nv_mem_release,
|
||||
.acquire = nv_mem_acquire,
|
||||
.get_pages = nv_mem_get_pages_nc,
|
||||
.dma_map = nv_dma_map,
|
||||
.dma_unmap = nv_dma_unmap,
|
||||
.put_pages = nv_mem_put_pages_nc,
|
||||
.get_page_size = nv_mem_get_page_size,
|
||||
.release = nv_mem_release,
|
||||
};
|
||||
|
||||
#endif /* NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT */
|
||||
@@ -442,8 +505,6 @@ static int __init nv_mem_client_init(void)
|
||||
}
|
||||
|
||||
#if defined (NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT)
|
||||
int status = 0;
|
||||
|
||||
// off by one, to leave space for the trailing '1' which is flagging
|
||||
// the new client type
|
||||
BUG_ON(strlen(DRV_NAME) > IB_PEER_MEMORY_NAME_MAX-1);
|
||||
@@ -472,25 +533,22 @@ static int __init nv_mem_client_init(void)
|
||||
&mem_invalidate_callback);
|
||||
if (!reg_handle) {
|
||||
peer_err("nv_mem_client_init -- error while registering traditional client\n");
|
||||
status = -EINVAL;
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
// The nc client enables support for persistent pages.
|
||||
// Thanks to this check, nvidia-peermem requires the new symbol from nvidia.ko, which
|
||||
// prevents users to unintentionally load this module with unsupported nvidia.ko.
|
||||
BUG_ON(!nvidia_p2p_cap_persistent_pages);
|
||||
strcpy(nv_mem_client_nc.name, DRV_NAME "_nc");
|
||||
strcpy(nv_mem_client_nc.version, DRV_VERSION);
|
||||
reg_handle_nc = ib_register_peer_memory_client(&nv_mem_client_nc, NULL);
|
||||
if (!reg_handle_nc) {
|
||||
peer_err("nv_mem_client_init -- error while registering nc client\n");
|
||||
status = -EINVAL;
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
if (status) {
|
||||
if (rc) {
|
||||
if (reg_handle) {
|
||||
ib_unregister_peer_memory_client(reg_handle);
|
||||
reg_handle = NULL;
|
||||
@@ -502,7 +560,7 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
return rc;
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
@@ -1,29 +1,33 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2013 NVIDIA Corporation
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __cla06fsubch_h__
|
||||
#define __cla06fsubch_h__
|
||||
#ifndef _cla06fsubch_h_
|
||||
#define _cla06fsubch_h_
|
||||
|
||||
#define NVA06F_SUBCHANNEL_2D 3
|
||||
#define NVA06F_SUBCHANNEL_3D 0
|
||||
#define NVA06F_SUBCHANNEL_COMPUTE 1
|
||||
#define NVA06F_SUBCHANNEL_COPY_ENGINE 4
|
||||
#define NVA06F_SUBCHANNEL_I2M 2
|
||||
|
||||
#endif // {__cla06fsubch_h__}
|
||||
#endif // _cla06fsubch_h_
|
||||
|
||||
@@ -1,25 +1,25 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2021-2022 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*******************************************************************************/
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _cla16f_h_
|
||||
#define _cla16f_h_
|
||||
@@ -30,9 +30,48 @@ extern "C" {
|
||||
|
||||
#include "nvtypes.h"
|
||||
|
||||
#define KEPLER_CHANNEL_GPFIFO_B (0x0000A16F)
|
||||
/* class KEPLER_CHANNEL_GPFIFO */
|
||||
/*
|
||||
* Documentation for KEPLER_CHANNEL_GPFIFO can be found in dev_pbdma.ref,
|
||||
* chapter "User Control Registers". It is documented as device NV_UDMA.
|
||||
* The GPFIFO format itself is also documented in dev_pbdma.ref,
|
||||
* NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref,
|
||||
* chapter "FIFO DMA RAM", NV_FIFO_DMA_*.
|
||||
*
|
||||
*/
|
||||
#define KEPLER_CHANNEL_GPFIFO_B (0x0000A16F)
|
||||
|
||||
/* pio method data structure */
|
||||
typedef volatile struct _cla16f_tag0 {
|
||||
NvV32 Reserved00[0x7c0];
|
||||
} NvA16FTypedef, KEPLER_ChannelGPFifoB;
|
||||
#define NVA16F_TYPEDEF KEPLER_CHANNELChannelGPFifo
|
||||
/* dma flow control data structure */
|
||||
typedef volatile struct _cla16f_tag1 {
|
||||
NvU32 Ignored00[0x010]; /* 0000-003f*/
|
||||
NvU32 Put; /* put offset, read/write 0040-0043*/
|
||||
NvU32 Get; /* get offset, read only 0044-0047*/
|
||||
NvU32 Reference; /* reference value, read only 0048-004b*/
|
||||
NvU32 PutHi; /* high order put offset bits 004c-004f*/
|
||||
NvU32 Ignored01[0x002]; /* 0050-0057*/
|
||||
NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/
|
||||
NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/
|
||||
NvU32 GetHi; /* high order get offset bits 0060-0063*/
|
||||
NvU32 Ignored02[0x007]; /* 0064-007f*/
|
||||
NvU32 Ignored03; /* used to be engine yield 0080-0083*/
|
||||
NvU32 Ignored04[0x001]; /* 0084-0087*/
|
||||
NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/
|
||||
NvU32 GPPut; /* GP FIFO put offset 008c-008f*/
|
||||
NvU32 Ignored05[0x5c];
|
||||
} NvA16FControl, KeplerBControlGPFifo;
|
||||
/* fields and values */
|
||||
#define NVA16F_NUMBER_OF_SUBCHANNELS (8)
|
||||
#define NVA16F_SET_OBJECT (0x00000000)
|
||||
#define NVA16F_SET_OBJECT_NVCLASS 15:0
|
||||
#define NVA16F_SET_OBJECT_ENGINE 20:16
|
||||
#define NVA16F_SET_OBJECT_ENGINE_SW 0x0000001f
|
||||
#define NVA16F_ILLEGAL (0x00000004)
|
||||
#define NVA16F_ILLEGAL_HANDLE 31:0
|
||||
#define NVA16F_NOP (0x00000008)
|
||||
#define NVA16F_NOP_HANDLE 31:0
|
||||
#define NVA16F_SEMAPHOREA (0x00000010)
|
||||
@@ -100,6 +139,12 @@ extern "C" {
|
||||
#define NVA16F_SET_REFERENCE_COUNT 31:0
|
||||
#define NVA16F_WFI (0x00000078)
|
||||
#define NVA16F_WFI_HANDLE 31:0
|
||||
#define NVA16F_CRC_CHECK (0x0000007c)
|
||||
#define NVA16F_CRC_CHECK_VALUE 31:0
|
||||
#define NVA16F_YIELD (0x00000080)
|
||||
#define NVA16F_YIELD_OP 1:0
|
||||
#define NVA16F_YIELD_OP_NOP 0x00000000
|
||||
|
||||
|
||||
/* GPFIFO entry format */
|
||||
#define NVA16F_GP_ENTRY__SIZE 8
|
||||
@@ -126,13 +171,28 @@ extern "C" {
|
||||
#define NVA16F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003
|
||||
|
||||
/* dma method formats */
|
||||
#define NVA16F_DMA_METHOD_ADDRESS_OLD 12:2
|
||||
#define NVA16F_DMA_METHOD_ADDRESS 11:0
|
||||
#define NVA16F_DMA_SUBDEVICE_MASK 15:4
|
||||
#define NVA16F_DMA_METHOD_SUBCHANNEL 15:13
|
||||
#define NVA16F_DMA_TERT_OP 17:16
|
||||
#define NVA16F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000)
|
||||
#define NVA16F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001)
|
||||
#define NVA16F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002)
|
||||
#define NVA16F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003)
|
||||
#define NVA16F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000)
|
||||
#define NVA16F_DMA_METHOD_COUNT_OLD 28:18
|
||||
#define NVA16F_DMA_METHOD_COUNT 28:16
|
||||
#define NVA16F_DMA_IMMD_DATA 28:16
|
||||
#define NVA16F_DMA_SEC_OP 31:29
|
||||
#define NVA16F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000)
|
||||
#define NVA16F_DMA_SEC_OP_INC_METHOD (0x00000001)
|
||||
#define NVA16F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002)
|
||||
#define NVA16F_DMA_SEC_OP_NON_INC_METHOD (0x00000003)
|
||||
|
||||
#define NVA16F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004)
|
||||
#define NVA16F_DMA_SEC_OP_ONE_INC (0x00000005)
|
||||
#define NVA16F_DMA_SEC_OP_RESERVED6 (0x00000006)
|
||||
#define NVA16F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007)
|
||||
/* dma incrementing method format */
|
||||
#define NVA16F_DMA_INCR_ADDRESS 11:0
|
||||
#define NVA16F_DMA_INCR_SUBCHANNEL 15:13
|
||||
@@ -140,7 +200,6 @@ extern "C" {
|
||||
#define NVA16F_DMA_INCR_OPCODE 31:29
|
||||
#define NVA16F_DMA_INCR_OPCODE_VALUE (0x00000001)
|
||||
#define NVA16F_DMA_INCR_DATA 31:0
|
||||
|
||||
/* dma non-incrementing method format */
|
||||
#define NVA16F_DMA_NONINCR_ADDRESS 11:0
|
||||
#define NVA16F_DMA_NONINCR_SUBCHANNEL 15:13
|
||||
@@ -148,13 +207,45 @@ extern "C" {
|
||||
#define NVA16F_DMA_NONINCR_OPCODE 31:29
|
||||
#define NVA16F_DMA_NONINCR_OPCODE_VALUE (0x00000003)
|
||||
#define NVA16F_DMA_NONINCR_DATA 31:0
|
||||
|
||||
/* dma increment-once method format */
|
||||
#define NVA16F_DMA_ONEINCR_ADDRESS 11:0
|
||||
#define NVA16F_DMA_ONEINCR_SUBCHANNEL 15:13
|
||||
#define NVA16F_DMA_ONEINCR_COUNT 28:16
|
||||
#define NVA16F_DMA_ONEINCR_OPCODE 31:29
|
||||
#define NVA16F_DMA_ONEINCR_OPCODE_VALUE (0x00000005)
|
||||
#define NVA16F_DMA_ONEINCR_DATA 31:0
|
||||
/* dma no-operation format */
|
||||
#define NVA16F_DMA_NOP (0x00000000)
|
||||
/* dma immediate-data format */
|
||||
#define NVA16F_DMA_IMMD_ADDRESS 11:0
|
||||
#define NVA16F_DMA_IMMD_SUBCHANNEL 15:13
|
||||
#define NVA16F_DMA_IMMD_DATA 28:16
|
||||
#define NVA16F_DMA_IMMD_OPCODE 31:29
|
||||
#define NVA16F_DMA_IMMD_OPCODE_VALUE (0x00000004)
|
||||
/* dma set sub-device mask format */
|
||||
#define NVA16F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4
|
||||
#define NVA16F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16
|
||||
#define NVA16F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001)
|
||||
/* dma store sub-device mask format */
|
||||
#define NVA16F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4
|
||||
#define NVA16F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16
|
||||
#define NVA16F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002)
|
||||
/* dma use sub-device mask format */
|
||||
#define NVA16F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16
|
||||
#define NVA16F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003)
|
||||
/* dma end-segment format */
|
||||
#define NVA16F_DMA_ENDSEG_OPCODE 31:29
|
||||
#define NVA16F_DMA_ENDSEG_OPCODE_VALUE (0x00000007)
|
||||
/* dma legacy incrementing/non-incrementing formats */
|
||||
#define NVA16F_DMA_ADDRESS 12:2
|
||||
#define NVA16F_DMA_SUBCH 15:13
|
||||
#define NVA16F_DMA_OPCODE3 17:16
|
||||
#define NVA16F_DMA_OPCODE3_NONE (0x00000000)
|
||||
#define NVA16F_DMA_COUNT 28:18
|
||||
#define NVA16F_DMA_OPCODE 31:29
|
||||
#define NVA16F_DMA_OPCODE_METHOD (0x00000000)
|
||||
#define NVA16F_DMA_OPCODE_NONINC_METHOD (0x00000002)
|
||||
#define NVA16F_DMA_DATA 31:0
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
|
||||
@@ -1,24 +1,26 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2014 NVidia Corporation
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
*******************************************************************************/
|
||||
#ifndef _clb069_h_
|
||||
#define _clb069_h_
|
||||
|
||||
|
||||
@@ -1,28 +1,28 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2014 NVIDIA Corporation
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _clB06f_h_
|
||||
#define _clB06f_h_
|
||||
#ifndef _clb06f_h_
|
||||
#define _clb06f_h_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -30,10 +30,46 @@ extern "C" {
|
||||
|
||||
#include "nvtypes.h"
|
||||
|
||||
/* class MAXWELL_CHANNEL_GPFIFO */
|
||||
/*
|
||||
* Documentation for MAXWELL_CHANNEL_GPFIFO can be found in dev_pbdma.ref,
|
||||
* chapter "User Control Registers". It is documented as device NV_UDMA.
|
||||
* The GPFIFO format itself is also documented in dev_pbdma.ref,
|
||||
* NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref,
|
||||
* chapter "FIFO DMA RAM", NV_FIFO_DMA_*.
|
||||
*
|
||||
*/
|
||||
#define MAXWELL_CHANNEL_GPFIFO_A (0x0000B06F)
|
||||
|
||||
/* class MAXWELL_CHANNEL_GPFIFO */
|
||||
#define NVB06F_TYPEDEF MAXWELL_CHANNELChannelGPFifoA
|
||||
|
||||
/* dma flow control data structure */
|
||||
typedef volatile struct _clb06f_tag0 {
|
||||
NvU32 Ignored00[0x010]; /* 0000-003f*/
|
||||
NvU32 Put; /* put offset, read/write 0040-0043*/
|
||||
NvU32 Get; /* get offset, read only 0044-0047*/
|
||||
NvU32 Reference; /* reference value, read only 0048-004b*/
|
||||
NvU32 PutHi; /* high order put offset bits 004c-004f*/
|
||||
NvU32 Ignored01[0x002]; /* 0050-0057*/
|
||||
NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/
|
||||
NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/
|
||||
NvU32 GetHi; /* high order get offset bits 0060-0063*/
|
||||
NvU32 Ignored02[0x007]; /* 0064-007f*/
|
||||
NvU32 Ignored03; /* used to be engine yield 0080-0083*/
|
||||
NvU32 Ignored04[0x001]; /* 0084-0087*/
|
||||
NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/
|
||||
NvU32 GPPut; /* GP FIFO put offset 008c-008f*/
|
||||
NvU32 Ignored05[0x5c];
|
||||
} Nvb06FControl, MaxwellAControlGPFifo;
|
||||
|
||||
/* fields and values */
|
||||
#define NVB06F_NUMBER_OF_SUBCHANNELS (8)
|
||||
#define NVB06F_SET_OBJECT (0x00000000)
|
||||
#define NVB06F_SET_OBJECT_NVCLASS 15:0
|
||||
#define NVB06F_SET_OBJECT_ENGINE 20:16
|
||||
#define NVB06F_SET_OBJECT_ENGINE_SW 0x0000001f
|
||||
#define NVB06F_ILLEGAL (0x00000004)
|
||||
#define NVB06F_ILLEGAL_HANDLE 31:0
|
||||
#define NVB06F_NOP (0x00000008)
|
||||
#define NVB06F_NOP_HANDLE 31:0
|
||||
#define NVB06F_SEMAPHOREA (0x00000010)
|
||||
@@ -47,6 +83,8 @@ extern "C" {
|
||||
#define NVB06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001
|
||||
#define NVB06F_SEMAPHORED_OPERATION_RELEASE 0x00000002
|
||||
#define NVB06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004
|
||||
#define NVB06F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008
|
||||
#define NVB06F_SEMAPHORED_OPERATION_REDUCTION 0x00000010
|
||||
#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH 12:12
|
||||
#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000
|
||||
#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001
|
||||
@@ -56,8 +94,22 @@ extern "C" {
|
||||
#define NVB06F_SEMAPHORED_RELEASE_SIZE 24:24
|
||||
#define NVB06F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000
|
||||
#define NVB06F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001
|
||||
|
||||
#define NVB06F_SEMAPHORED_REDUCTION 30:27
|
||||
#define NVB06F_SEMAPHORED_REDUCTION_MIN 0x00000000
|
||||
#define NVB06F_SEMAPHORED_REDUCTION_MAX 0x00000001
|
||||
#define NVB06F_SEMAPHORED_REDUCTION_XOR 0x00000002
|
||||
#define NVB06F_SEMAPHORED_REDUCTION_AND 0x00000003
|
||||
#define NVB06F_SEMAPHORED_REDUCTION_OR 0x00000004
|
||||
#define NVB06F_SEMAPHORED_REDUCTION_ADD 0x00000005
|
||||
#define NVB06F_SEMAPHORED_REDUCTION_INC 0x00000006
|
||||
#define NVB06F_SEMAPHORED_REDUCTION_DEC 0x00000007
|
||||
#define NVB06F_SEMAPHORED_FORMAT 31:31
|
||||
#define NVB06F_SEMAPHORED_FORMAT_SIGNED 0x00000000
|
||||
#define NVB06F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001
|
||||
#define NVB06F_NON_STALL_INTERRUPT (0x00000020)
|
||||
#define NVB06F_NON_STALL_INTERRUPT_HANDLE 31:0
|
||||
#define NVB06F_FB_FLUSH (0x00000024)
|
||||
#define NVB06F_FB_FLUSH_HANDLE 31:0
|
||||
// NOTE - MEM_OP_A and MEM_OP_B have been removed for gm20x to make room for
|
||||
// possible future MEM_OP features. MEM_OP_C/D have identical functionality
|
||||
// to the previous MEM_OP_A/B methods.
|
||||
@@ -84,10 +136,27 @@ extern "C" {
|
||||
#define NVB06F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f
|
||||
#define NVB06F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010
|
||||
#define NVB06F_MEM_OP_D_TLB_INVALIDATE_ADDR_HI 7:0
|
||||
#define NVB06F_SET_REFERENCE (0x00000050)
|
||||
#define NVB06F_SET_REFERENCE_COUNT 31:0
|
||||
#define NVB06F_WFI (0x00000078)
|
||||
#define NVB06F_WFI_SCOPE 0:0
|
||||
#define NVB06F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000
|
||||
#define NVB06F_WFI_SCOPE_ALL 0x00000001
|
||||
#define NVB06F_CRC_CHECK (0x0000007c)
|
||||
#define NVB06F_CRC_CHECK_VALUE 31:0
|
||||
#define NVB06F_YIELD (0x00000080)
|
||||
#define NVB06F_YIELD_OP 1:0
|
||||
#define NVB06F_YIELD_OP_NOP 0x00000000
|
||||
#define NVB06F_YIELD_OP_PBDMA_TIMESLICE 0x00000001
|
||||
#define NVB06F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002
|
||||
#define NVB06F_YIELD_OP_TSG 0x00000003
|
||||
|
||||
|
||||
/* GPFIFO entry format */
|
||||
#define NVB06F_GP_ENTRY__SIZE 8
|
||||
#define NVB06F_GP_ENTRY0_FETCH 0:0
|
||||
#define NVB06F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000
|
||||
#define NVB06F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001
|
||||
#define NVB06F_GP_ENTRY0_GET 31:2
|
||||
#define NVB06F_GP_ENTRY0_OPERAND 31:0
|
||||
#define NVB06F_GP_ENTRY1_GET_HI 7:0
|
||||
@@ -98,11 +167,38 @@ extern "C" {
|
||||
#define NVB06F_GP_ENTRY1_LEVEL_MAIN 0x00000000
|
||||
#define NVB06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001
|
||||
#define NVB06F_GP_ENTRY1_LENGTH 30:10
|
||||
#define NVB06F_GP_ENTRY1_SYNC 31:31
|
||||
#define NVB06F_GP_ENTRY1_SYNC_PROCEED 0x00000000
|
||||
#define NVB06F_GP_ENTRY1_SYNC_WAIT 0x00000001
|
||||
#define NVB06F_GP_ENTRY1_OPCODE 7:0
|
||||
#define NVB06F_GP_ENTRY1_OPCODE_NOP 0x00000000
|
||||
#define NVB06F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001
|
||||
#define NVB06F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002
|
||||
#define NVB06F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003
|
||||
|
||||
/* dma method formats */
|
||||
#define NVB06F_DMA_METHOD_ADDRESS_OLD 12:2
|
||||
#define NVB06F_DMA_METHOD_ADDRESS 11:0
|
||||
#define NVB06F_DMA_SUBDEVICE_MASK 15:4
|
||||
#define NVB06F_DMA_METHOD_SUBCHANNEL 15:13
|
||||
#define NVB06F_DMA_TERT_OP 17:16
|
||||
#define NVB06F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000)
|
||||
#define NVB06F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001)
|
||||
#define NVB06F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002)
|
||||
#define NVB06F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003)
|
||||
#define NVB06F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000)
|
||||
#define NVB06F_DMA_METHOD_COUNT_OLD 28:18
|
||||
#define NVB06F_DMA_METHOD_COUNT 28:16
|
||||
#define NVB06F_DMA_IMMD_DATA 28:16
|
||||
#define NVB06F_DMA_SEC_OP 31:29
|
||||
#define NVB06F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000)
|
||||
#define NVB06F_DMA_SEC_OP_INC_METHOD (0x00000001)
|
||||
#define NVB06F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002)
|
||||
#define NVB06F_DMA_SEC_OP_NON_INC_METHOD (0x00000003)
|
||||
#define NVB06F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004)
|
||||
#define NVB06F_DMA_SEC_OP_ONE_INC (0x00000005)
|
||||
#define NVB06F_DMA_SEC_OP_RESERVED6 (0x00000006)
|
||||
#define NVB06F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007)
|
||||
/* dma incrementing method format */
|
||||
#define NVB06F_DMA_INCR_ADDRESS 11:0
|
||||
#define NVB06F_DMA_INCR_SUBCHANNEL 15:13
|
||||
@@ -132,9 +228,33 @@ extern "C" {
|
||||
#define NVB06F_DMA_IMMD_DATA 28:16
|
||||
#define NVB06F_DMA_IMMD_OPCODE 31:29
|
||||
#define NVB06F_DMA_IMMD_OPCODE_VALUE (0x00000004)
|
||||
/* dma set sub-device mask format */
|
||||
#define NVB06F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4
|
||||
#define NVB06F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16
|
||||
#define NVB06F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001)
|
||||
/* dma store sub-device mask format */
|
||||
#define NVB06F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4
|
||||
#define NVB06F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16
|
||||
#define NVB06F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002)
|
||||
/* dma use sub-device mask format */
|
||||
#define NVB06F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16
|
||||
#define NVB06F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003)
|
||||
/* dma end-segment format */
|
||||
#define NVB06F_DMA_ENDSEG_OPCODE 31:29
|
||||
#define NVB06F_DMA_ENDSEG_OPCODE_VALUE (0x00000007)
|
||||
/* dma legacy incrementing/non-incrementing formats */
|
||||
#define NVB06F_DMA_ADDRESS 12:2
|
||||
#define NVB06F_DMA_SUBCH 15:13
|
||||
#define NVB06F_DMA_OPCODE3 17:16
|
||||
#define NVB06F_DMA_OPCODE3_NONE (0x00000000)
|
||||
#define NVB06F_DMA_COUNT 28:18
|
||||
#define NVB06F_DMA_OPCODE 31:29
|
||||
#define NVB06F_DMA_OPCODE_METHOD (0x00000000)
|
||||
#define NVB06F_DMA_OPCODE_NONINC_METHOD (0x00000002)
|
||||
#define NVB06F_DMA_DATA 31:0
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif /* _clB06F_h_ */
|
||||
#endif /* _clb06f_h_ */
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2014 NVIDIA Corporation
|
||||
Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
@@ -32,6 +32,10 @@ extern "C" {
|
||||
|
||||
#define MAXWELL_DMA_COPY_A (0x0000B0B5)
|
||||
|
||||
#define NVB0B5_NOP (0x00000100)
|
||||
#define NVB0B5_NOP_PARAMETER 31:0
|
||||
#define NVB0B5_PM_TRIGGER (0x00000140)
|
||||
#define NVB0B5_PM_TRIGGER_V 31:0
|
||||
#define NVB0B5_SET_SEMAPHORE_A (0x00000240)
|
||||
#define NVB0B5_SET_SEMAPHORE_A_UPPER 7:0
|
||||
#define NVB0B5_SET_SEMAPHORE_B (0x00000244)
|
||||
@@ -183,9 +187,75 @@ extern "C" {
|
||||
#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001)
|
||||
#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002)
|
||||
#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003)
|
||||
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE (0x0000070C)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_WIDTH 3:0
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_WIDTH_QUARTER_GOB (0x0000000E)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH 11:8
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_TESLA_4 (0x00000000)
|
||||
#define NVB0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
||||
#define NVB0B5_SET_DST_WIDTH (0x00000710)
|
||||
#define NVB0B5_SET_DST_WIDTH_V 31:0
|
||||
#define NVB0B5_SET_DST_HEIGHT (0x00000714)
|
||||
#define NVB0B5_SET_DST_HEIGHT_V 31:0
|
||||
#define NVB0B5_SET_DST_DEPTH (0x00000718)
|
||||
#define NVB0B5_SET_DST_DEPTH_V 31:0
|
||||
#define NVB0B5_SET_DST_LAYER (0x0000071C)
|
||||
#define NVB0B5_SET_DST_LAYER_V 31:0
|
||||
#define NVB0B5_SET_DST_ORIGIN (0x00000720)
|
||||
#define NVB0B5_SET_DST_ORIGIN_X 15:0
|
||||
#define NVB0B5_SET_DST_ORIGIN_Y 31:16
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE (0x00000728)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_WIDTH_QUARTER_GOB (0x0000000E)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_TESLA_4 (0x00000000)
|
||||
#define NVB0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
||||
#define NVB0B5_SET_SRC_WIDTH (0x0000072C)
|
||||
#define NVB0B5_SET_SRC_WIDTH_V 31:0
|
||||
#define NVB0B5_SET_SRC_HEIGHT (0x00000730)
|
||||
#define NVB0B5_SET_SRC_HEIGHT_V 31:0
|
||||
#define NVB0B5_SET_SRC_DEPTH (0x00000734)
|
||||
#define NVB0B5_SET_SRC_DEPTH_V 31:0
|
||||
#define NVB0B5_SET_SRC_LAYER (0x00000738)
|
||||
#define NVB0B5_SET_SRC_LAYER_V 31:0
|
||||
#define NVB0B5_SET_SRC_ORIGIN (0x0000073C)
|
||||
#define NVB0B5_SET_SRC_ORIGIN_X 15:0
|
||||
#define NVB0B5_SET_SRC_ORIGIN_Y 31:16
|
||||
#define NVB0B5_PM_TRIGGER_END (0x00001114)
|
||||
#define NVB0B5_PM_TRIGGER_END_V 31:0
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
#endif
|
||||
#endif // _clb0b5_h
|
||||
|
||||
|
||||
@@ -1,25 +1,25 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2014 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*******************************************************************************/
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _clc06f_h_
|
||||
#define _clc06f_h_
|
||||
@@ -30,10 +30,47 @@ extern "C" {
|
||||
|
||||
#include "nvtypes.h"
|
||||
|
||||
/* class PASCAL_CHANNEL_GPFIFO */
|
||||
/*
|
||||
* Documentation for PASCAL_CHANNEL_GPFIFO can be found in dev_pbdma.ref,
|
||||
* chapter "User Control Registers". It is documented as device NV_UDMA.
|
||||
* The GPFIFO format itself is also documented in dev_pbdma.ref,
|
||||
* NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref,
|
||||
* chapter "FIFO DMA RAM", NV_FIFO_DMA_*.
|
||||
*
|
||||
* Note there is no .mfs file for this class.
|
||||
*/
|
||||
#define PASCAL_CHANNEL_GPFIFO_A (0x0000C06F)
|
||||
|
||||
/* class PASCAL_CHANNEL_GPFIFO_A */
|
||||
#define NVC06F_TYPEDEF PASCAL_CHANNELChannelGPFifoA
|
||||
|
||||
/* dma flow control data structure */
|
||||
typedef volatile struct Nvc06fControl_struct {
|
||||
NvU32 Ignored00[0x010]; /* 0000-003f*/
|
||||
NvU32 Put; /* put offset, read/write 0040-0043*/
|
||||
NvU32 Get; /* get offset, read only 0044-0047*/
|
||||
NvU32 Reference; /* reference value, read only 0048-004b*/
|
||||
NvU32 PutHi; /* high order put offset bits 004c-004f*/
|
||||
NvU32 Ignored01[0x002]; /* 0050-0057*/
|
||||
NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/
|
||||
NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/
|
||||
NvU32 GetHi; /* high order get offset bits 0060-0063*/
|
||||
NvU32 Ignored02[0x007]; /* 0064-007f*/
|
||||
NvU32 Ignored03; /* used to be engine yield 0080-0083*/
|
||||
NvU32 Ignored04[0x001]; /* 0084-0087*/
|
||||
NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/
|
||||
NvU32 GPPut; /* GP FIFO put offset 008c-008f*/
|
||||
NvU32 Ignored05[0x5c];
|
||||
} Nvc06fControl, PascalAControlGPFifo;
|
||||
|
||||
/* fields and values */
|
||||
#define NVC06F_NUMBER_OF_SUBCHANNELS (8)
|
||||
#define NVC06F_SET_OBJECT (0x00000000)
|
||||
#define NVC06F_SET_OBJECT_NVCLASS 15:0
|
||||
#define NVC06F_SET_OBJECT_ENGINE 20:16
|
||||
#define NVC06F_SET_OBJECT_ENGINE_SW 0x0000001f
|
||||
#define NVC06F_ILLEGAL (0x00000004)
|
||||
#define NVC06F_ILLEGAL_HANDLE 31:0
|
||||
#define NVC06F_NOP (0x00000008)
|
||||
#define NVC06F_NOP_HANDLE 31:0
|
||||
#define NVC06F_SEMAPHOREA (0x00000010)
|
||||
@@ -47,54 +84,33 @@ extern "C" {
|
||||
#define NVC06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001
|
||||
#define NVC06F_SEMAPHORED_OPERATION_RELEASE 0x00000002
|
||||
#define NVC06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004
|
||||
#define NVC06F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008
|
||||
#define NVC06F_SEMAPHORED_OPERATION_REDUCTION 0x00000010
|
||||
#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH 12:12
|
||||
#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000
|
||||
#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001
|
||||
|
||||
|
||||
/* GPFIFO entry format */
|
||||
#define NVC06F_GP_ENTRY__SIZE 8
|
||||
#define NVC06F_GP_ENTRY0_GET 31:2
|
||||
#define NVC06F_GP_ENTRY0_OPERAND 31:0
|
||||
#define NVC06F_GP_ENTRY1_GET_HI 7:0
|
||||
#define NVC06F_GP_ENTRY1_PRIV 8:8
|
||||
#define NVC06F_GP_ENTRY1_PRIV_USER 0x00000000
|
||||
#define NVC06F_GP_ENTRY1_PRIV_KERNEL 0x00000001
|
||||
#define NVC06F_GP_ENTRY1_LEVEL 9:9
|
||||
#define NVC06F_GP_ENTRY1_LEVEL_MAIN 0x00000000
|
||||
#define NVC06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001
|
||||
#define NVC06F_GP_ENTRY1_LENGTH 30:10
|
||||
|
||||
/* dma incrementing method format */
|
||||
#define NVC06F_DMA_INCR_ADDRESS 11:0
|
||||
#define NVC06F_DMA_INCR_SUBCHANNEL 15:13
|
||||
#define NVC06F_DMA_INCR_COUNT 28:16
|
||||
#define NVC06F_DMA_INCR_OPCODE 31:29
|
||||
#define NVC06F_DMA_INCR_OPCODE_VALUE (0x00000001)
|
||||
#define NVC06F_DMA_INCR_DATA 31:0
|
||||
/* dma non-incrementing method format */
|
||||
#define NVC06F_DMA_NONINCR_ADDRESS 11:0
|
||||
#define NVC06F_DMA_NONINCR_SUBCHANNEL 15:13
|
||||
#define NVC06F_DMA_NONINCR_COUNT 28:16
|
||||
#define NVC06F_DMA_NONINCR_OPCODE 31:29
|
||||
#define NVC06F_DMA_NONINCR_OPCODE_VALUE (0x00000003)
|
||||
#define NVC06F_DMA_NONINCR_DATA 31:0
|
||||
/* dma increment-once method format */
|
||||
#define NVC06F_DMA_ONEINCR_ADDRESS 11:0
|
||||
#define NVC06F_DMA_ONEINCR_SUBCHANNEL 15:13
|
||||
#define NVC06F_DMA_ONEINCR_COUNT 28:16
|
||||
#define NVC06F_DMA_ONEINCR_OPCODE 31:29
|
||||
#define NVC06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005)
|
||||
#define NVC06F_DMA_ONEINCR_DATA 31:0
|
||||
/* dma no-operation format */
|
||||
#define NVC06F_DMA_NOP (0x00000000)
|
||||
/* dma immediate-data format */
|
||||
#define NVC06F_DMA_IMMD_ADDRESS 11:0
|
||||
#define NVC06F_DMA_IMMD_SUBCHANNEL 15:13
|
||||
#define NVC06F_DMA_IMMD_DATA 28:16
|
||||
#define NVC06F_DMA_IMMD_OPCODE 31:29
|
||||
#define NVC06F_DMA_IMMD_OPCODE_VALUE (0x00000004)
|
||||
|
||||
#define NVC06F_SEMAPHORED_RELEASE_WFI 20:20
|
||||
#define NVC06F_SEMAPHORED_RELEASE_WFI_EN 0x00000000
|
||||
#define NVC06F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001
|
||||
#define NVC06F_SEMAPHORED_RELEASE_SIZE 24:24
|
||||
#define NVC06F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000
|
||||
#define NVC06F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001
|
||||
#define NVC06F_SEMAPHORED_REDUCTION 30:27
|
||||
#define NVC06F_SEMAPHORED_REDUCTION_MIN 0x00000000
|
||||
#define NVC06F_SEMAPHORED_REDUCTION_MAX 0x00000001
|
||||
#define NVC06F_SEMAPHORED_REDUCTION_XOR 0x00000002
|
||||
#define NVC06F_SEMAPHORED_REDUCTION_AND 0x00000003
|
||||
#define NVC06F_SEMAPHORED_REDUCTION_OR 0x00000004
|
||||
#define NVC06F_SEMAPHORED_REDUCTION_ADD 0x00000005
|
||||
#define NVC06F_SEMAPHORED_REDUCTION_INC 0x00000006
|
||||
#define NVC06F_SEMAPHORED_REDUCTION_DEC 0x00000007
|
||||
#define NVC06F_SEMAPHORED_FORMAT 31:31
|
||||
#define NVC06F_SEMAPHORED_FORMAT_SIGNED 0x00000000
|
||||
#define NVC06F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001
|
||||
#define NVC06F_NON_STALL_INTERRUPT (0x00000020)
|
||||
#define NVC06F_NON_STALL_INTERRUPT_HANDLE 31:0
|
||||
#define NVC06F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR
|
||||
#define NVC06F_FB_FLUSH_HANDLE 31:0
|
||||
// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for
|
||||
// specifying the page address for a targeted TLB invalidate and the uTLB for
|
||||
// a targeted REPLAY_CANCEL for UVM.
|
||||
@@ -153,19 +169,142 @@ extern "C" {
|
||||
#define NVC06F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d
|
||||
#define NVC06F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e
|
||||
// CLEAN_LINES is an alias for Tegra/GPU IP usage
|
||||
#define NVC06F_MEM_OP_D_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e
|
||||
// This B alias is confusing but it was missed as part of the update. Left here
|
||||
// for compatibility.
|
||||
#define NVC06F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e
|
||||
#define NVC06F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f
|
||||
#define NVC06F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010
|
||||
#define NVC06F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015
|
||||
#define NVC06F_SET_REFERENCE (0x00000050)
|
||||
#define NVC06F_SET_REFERENCE_COUNT 31:0
|
||||
|
||||
// Syncpoint methods are only available on Tegra parts. Attempting to use
|
||||
// them on discrete GPUs will result in Host raising NV_PPBDMA_INTR_0_METHOD.
|
||||
#define NVC06F_SYNCPOINTA (0x00000070)
|
||||
#define NVC06F_SYNCPOINTA_PAYLOAD 31:0
|
||||
#define NVC06F_SYNCPOINTB (0x00000074)
|
||||
#define NVC06F_SYNCPOINTB_OPERATION 0:0
|
||||
#define NVC06F_SYNCPOINTB_OPERATION_WAIT 0x00000000
|
||||
#define NVC06F_SYNCPOINTB_OPERATION_INCR 0x00000001
|
||||
#define NVC06F_SYNCPOINTB_WAIT_SWITCH 4:4
|
||||
#define NVC06F_SYNCPOINTB_WAIT_SWITCH_DIS 0x00000000
|
||||
#define NVC06F_SYNCPOINTB_WAIT_SWITCH_EN 0x00000001
|
||||
#define NVC06F_SYNCPOINTB_SYNCPT_INDEX 19:8
|
||||
#define NVC06F_WFI (0x00000078)
|
||||
#define NVC06F_WFI_SCOPE 0:0
|
||||
#define NVC06F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000
|
||||
#define NVC06F_WFI_SCOPE_ALL 0x00000001
|
||||
#define NVC06F_CRC_CHECK (0x0000007c)
|
||||
#define NVC06F_CRC_CHECK_VALUE 31:0
|
||||
#define NVC06F_YIELD (0x00000080)
|
||||
#define NVC06F_YIELD_OP 1:0
|
||||
#define NVC06F_YIELD_OP_NOP 0x00000000
|
||||
#define NVC06F_YIELD_OP_PBDMA_TIMESLICE 0x00000001
|
||||
#define NVC06F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002
|
||||
#define NVC06F_YIELD_OP_TSG 0x00000003
|
||||
|
||||
|
||||
/* GPFIFO entry format */
|
||||
#define NVC06F_GP_ENTRY__SIZE 8
|
||||
#define NVC06F_GP_ENTRY0_FETCH 0:0
|
||||
#define NVC06F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000
|
||||
#define NVC06F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001
|
||||
#define NVC06F_GP_ENTRY0_GET 31:2
|
||||
#define NVC06F_GP_ENTRY0_OPERAND 31:0
|
||||
#define NVC06F_GP_ENTRY1_GET_HI 7:0
|
||||
#define NVC06F_GP_ENTRY1_PRIV 8:8
|
||||
#define NVC06F_GP_ENTRY1_PRIV_USER 0x00000000
|
||||
#define NVC06F_GP_ENTRY1_PRIV_KERNEL 0x00000001
|
||||
#define NVC06F_GP_ENTRY1_LEVEL 9:9
|
||||
#define NVC06F_GP_ENTRY1_LEVEL_MAIN 0x00000000
|
||||
#define NVC06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001
|
||||
#define NVC06F_GP_ENTRY1_LENGTH 30:10
|
||||
#define NVC06F_GP_ENTRY1_SYNC 31:31
|
||||
#define NVC06F_GP_ENTRY1_SYNC_PROCEED 0x00000000
|
||||
#define NVC06F_GP_ENTRY1_SYNC_WAIT 0x00000001
|
||||
#define NVC06F_GP_ENTRY1_OPCODE 7:0
|
||||
#define NVC06F_GP_ENTRY1_OPCODE_NOP 0x00000000
|
||||
#define NVC06F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001
|
||||
#define NVC06F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002
|
||||
#define NVC06F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003
|
||||
|
||||
/* dma method formats */
|
||||
#define NVC06F_DMA_METHOD_ADDRESS_OLD 12:2
|
||||
#define NVC06F_DMA_METHOD_ADDRESS 11:0
|
||||
#define NVC06F_DMA_SUBDEVICE_MASK 15:4
|
||||
#define NVC06F_DMA_METHOD_SUBCHANNEL 15:13
|
||||
#define NVC06F_DMA_TERT_OP 17:16
|
||||
#define NVC06F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000)
|
||||
#define NVC06F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001)
|
||||
#define NVC06F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002)
|
||||
#define NVC06F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003)
|
||||
#define NVC06F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000)
|
||||
#define NVC06F_DMA_METHOD_COUNT_OLD 28:18
|
||||
#define NVC06F_DMA_METHOD_COUNT 28:16
|
||||
#define NVC06F_DMA_IMMD_DATA 28:16
|
||||
#define NVC06F_DMA_SEC_OP 31:29
|
||||
#define NVC06F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000)
|
||||
#define NVC06F_DMA_SEC_OP_INC_METHOD (0x00000001)
|
||||
#define NVC06F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002)
|
||||
#define NVC06F_DMA_SEC_OP_NON_INC_METHOD (0x00000003)
|
||||
#define NVC06F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004)
|
||||
#define NVC06F_DMA_SEC_OP_ONE_INC (0x00000005)
|
||||
#define NVC06F_DMA_SEC_OP_RESERVED6 (0x00000006)
|
||||
#define NVC06F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007)
|
||||
/* dma incrementing method format */
|
||||
#define NVC06F_DMA_INCR_ADDRESS 11:0
|
||||
#define NVC06F_DMA_INCR_SUBCHANNEL 15:13
|
||||
#define NVC06F_DMA_INCR_COUNT 28:16
|
||||
#define NVC06F_DMA_INCR_OPCODE 31:29
|
||||
#define NVC06F_DMA_INCR_OPCODE_VALUE (0x00000001)
|
||||
#define NVC06F_DMA_INCR_DATA 31:0
|
||||
/* dma non-incrementing method format */
|
||||
#define NVC06F_DMA_NONINCR_ADDRESS 11:0
|
||||
#define NVC06F_DMA_NONINCR_SUBCHANNEL 15:13
|
||||
#define NVC06F_DMA_NONINCR_COUNT 28:16
|
||||
#define NVC06F_DMA_NONINCR_OPCODE 31:29
|
||||
#define NVC06F_DMA_NONINCR_OPCODE_VALUE (0x00000003)
|
||||
#define NVC06F_DMA_NONINCR_DATA 31:0
|
||||
/* dma increment-once method format */
|
||||
#define NVC06F_DMA_ONEINCR_ADDRESS 11:0
|
||||
#define NVC06F_DMA_ONEINCR_SUBCHANNEL 15:13
|
||||
#define NVC06F_DMA_ONEINCR_COUNT 28:16
|
||||
#define NVC06F_DMA_ONEINCR_OPCODE 31:29
|
||||
#define NVC06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005)
|
||||
#define NVC06F_DMA_ONEINCR_DATA 31:0
|
||||
/* dma no-operation format */
|
||||
#define NVC06F_DMA_NOP (0x00000000)
|
||||
/* dma immediate-data format */
|
||||
#define NVC06F_DMA_IMMD_ADDRESS 11:0
|
||||
#define NVC06F_DMA_IMMD_SUBCHANNEL 15:13
|
||||
#define NVC06F_DMA_IMMD_DATA 28:16
|
||||
#define NVC06F_DMA_IMMD_OPCODE 31:29
|
||||
#define NVC06F_DMA_IMMD_OPCODE_VALUE (0x00000004)
|
||||
/* dma set sub-device mask format */
|
||||
#define NVC06F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4
|
||||
#define NVC06F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16
|
||||
#define NVC06F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001)
|
||||
/* dma store sub-device mask format */
|
||||
#define NVC06F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4
|
||||
#define NVC06F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16
|
||||
#define NVC06F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002)
|
||||
/* dma use sub-device mask format */
|
||||
#define NVC06F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16
|
||||
#define NVC06F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003)
|
||||
/* dma end-segment format */
|
||||
#define NVC06F_DMA_ENDSEG_OPCODE 31:29
|
||||
#define NVC06F_DMA_ENDSEG_OPCODE_VALUE (0x00000007)
|
||||
/* dma legacy incrementing/non-incrementing formats */
|
||||
#define NVC06F_DMA_ADDRESS 12:2
|
||||
#define NVC06F_DMA_SUBCH 15:13
|
||||
#define NVC06F_DMA_OPCODE3 17:16
|
||||
#define NVC06F_DMA_OPCODE3_NONE (0x00000000)
|
||||
#define NVC06F_DMA_COUNT 28:18
|
||||
#define NVC06F_DMA_OPCODE 31:29
|
||||
#define NVC06F_DMA_OPCODE_METHOD (0x00000000)
|
||||
#define NVC06F_DMA_OPCODE_NONINC_METHOD (0x00000002)
|
||||
#define NVC06F_DMA_DATA 31:0
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
#endif
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2014 NVIDIA Corporation
|
||||
Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
@@ -32,6 +32,10 @@ extern "C" {
|
||||
|
||||
#define PASCAL_DMA_COPY_A (0x0000C0B5)
|
||||
|
||||
#define NVC0B5_NOP (0x00000100)
|
||||
#define NVC0B5_NOP_PARAMETER 31:0
|
||||
#define NVC0B5_PM_TRIGGER (0x00000140)
|
||||
#define NVC0B5_PM_TRIGGER_V 31:0
|
||||
#define NVC0B5_SET_SEMAPHORE_A (0x00000240)
|
||||
#define NVC0B5_SET_SEMAPHORE_A_UPPER 16:0
|
||||
#define NVC0B5_SET_SEMAPHORE_B (0x00000244)
|
||||
@@ -115,6 +119,10 @@ extern "C" {
|
||||
#define NVC0B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20
|
||||
#define NVC0B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000)
|
||||
#define NVC0B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001)
|
||||
#define NVC0B5_LAUNCH_DMA_DST_BYPASS_L2 21:21
|
||||
#define NVC0B5_LAUNCH_DMA_DST_BYPASS_L2_USE_PTE_SETTING (0x00000000)
|
||||
#define NVC0B5_LAUNCH_DMA_DST_BYPASS_L2_FORCE_VOLATILE (0x00000001)
|
||||
#define NVC0B5_LAUNCH_DMA_RESERVED 31:28
|
||||
#define NVC0B5_OFFSET_IN_UPPER (0x00000400)
|
||||
#define NVC0B5_OFFSET_IN_UPPER_UPPER 16:0
|
||||
#define NVC0B5_OFFSET_IN_LOWER (0x00000404)
|
||||
@@ -183,6 +191,68 @@ extern "C" {
|
||||
#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001)
|
||||
#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002)
|
||||
#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE (0x0000070C)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_WIDTH 3:0
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH 11:8
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12
|
||||
#define NVC0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
||||
#define NVC0B5_SET_DST_WIDTH (0x00000710)
|
||||
#define NVC0B5_SET_DST_WIDTH_V 31:0
|
||||
#define NVC0B5_SET_DST_HEIGHT (0x00000714)
|
||||
#define NVC0B5_SET_DST_HEIGHT_V 31:0
|
||||
#define NVC0B5_SET_DST_DEPTH (0x00000718)
|
||||
#define NVC0B5_SET_DST_DEPTH_V 31:0
|
||||
#define NVC0B5_SET_DST_LAYER (0x0000071C)
|
||||
#define NVC0B5_SET_DST_LAYER_V 31:0
|
||||
#define NVC0B5_SET_DST_ORIGIN (0x00000720)
|
||||
#define NVC0B5_SET_DST_ORIGIN_X 15:0
|
||||
#define NVC0B5_SET_DST_ORIGIN_Y 31:16
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE (0x00000728)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12
|
||||
#define NVC0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
||||
#define NVC0B5_SET_SRC_WIDTH (0x0000072C)
|
||||
#define NVC0B5_SET_SRC_WIDTH_V 31:0
|
||||
#define NVC0B5_SET_SRC_HEIGHT (0x00000730)
|
||||
#define NVC0B5_SET_SRC_HEIGHT_V 31:0
|
||||
#define NVC0B5_SET_SRC_DEPTH (0x00000734)
|
||||
#define NVC0B5_SET_SRC_DEPTH_V 31:0
|
||||
#define NVC0B5_SET_SRC_LAYER (0x00000738)
|
||||
#define NVC0B5_SET_SRC_LAYER_V 31:0
|
||||
#define NVC0B5_SET_SRC_ORIGIN (0x0000073C)
|
||||
#define NVC0B5_SET_SRC_ORIGIN_X 15:0
|
||||
#define NVC0B5_SET_SRC_ORIGIN_Y 31:16
|
||||
#define NVC0B5_PM_TRIGGER_END (0x00001114)
|
||||
#define NVC0B5_PM_TRIGGER_END_V 31:0
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2014 NVIDIA Corporation
|
||||
Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
@@ -32,6 +32,10 @@ extern "C" {
|
||||
|
||||
#define PASCAL_DMA_COPY_B (0x0000C1B5)
|
||||
|
||||
#define NVC1B5_NOP (0x00000100)
|
||||
#define NVC1B5_NOP_PARAMETER 31:0
|
||||
#define NVC1B5_PM_TRIGGER (0x00000140)
|
||||
#define NVC1B5_PM_TRIGGER_V 31:0
|
||||
#define NVC1B5_SET_SEMAPHORE_A (0x00000240)
|
||||
#define NVC1B5_SET_SEMAPHORE_A_UPPER 16:0
|
||||
#define NVC1B5_SET_SEMAPHORE_B (0x00000244)
|
||||
@@ -115,6 +119,14 @@ extern "C" {
|
||||
#define NVC1B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20
|
||||
#define NVC1B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000)
|
||||
#define NVC1B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001)
|
||||
#define NVC1B5_LAUNCH_DMA_DST_BYPASS_L2 21:21
|
||||
#define NVC1B5_LAUNCH_DMA_DST_BYPASS_L2_USE_PTE_SETTING (0x00000000)
|
||||
#define NVC1B5_LAUNCH_DMA_DST_BYPASS_L2_FORCE_VOLATILE (0x00000001)
|
||||
#define NVC1B5_LAUNCH_DMA_VPRMODE 23:22
|
||||
#define NVC1B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
|
||||
#define NVC1B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
|
||||
#define NVC1B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
|
||||
#define NVC1B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28
|
||||
#define NVC1B5_OFFSET_IN_UPPER (0x00000400)
|
||||
#define NVC1B5_OFFSET_IN_UPPER_UPPER 16:0
|
||||
#define NVC1B5_OFFSET_IN_LOWER (0x00000404)
|
||||
@@ -183,6 +195,76 @@ extern "C" {
|
||||
#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001)
|
||||
#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002)
|
||||
#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE (0x0000070C)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_WIDTH 3:0
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH 11:8
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12
|
||||
#define NVC1B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
||||
#define NVC1B5_SET_DST_WIDTH (0x00000710)
|
||||
#define NVC1B5_SET_DST_WIDTH_V 31:0
|
||||
#define NVC1B5_SET_DST_HEIGHT (0x00000714)
|
||||
#define NVC1B5_SET_DST_HEIGHT_V 31:0
|
||||
#define NVC1B5_SET_DST_DEPTH (0x00000718)
|
||||
#define NVC1B5_SET_DST_DEPTH_V 31:0
|
||||
#define NVC1B5_SET_DST_LAYER (0x0000071C)
|
||||
#define NVC1B5_SET_DST_LAYER_V 31:0
|
||||
#define NVC1B5_SET_DST_ORIGIN (0x00000720)
|
||||
#define NVC1B5_SET_DST_ORIGIN_X 15:0
|
||||
#define NVC1B5_SET_DST_ORIGIN_Y 31:16
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE (0x00000728)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12
|
||||
#define NVC1B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
||||
#define NVC1B5_SET_SRC_WIDTH (0x0000072C)
|
||||
#define NVC1B5_SET_SRC_WIDTH_V 31:0
|
||||
#define NVC1B5_SET_SRC_HEIGHT (0x00000730)
|
||||
#define NVC1B5_SET_SRC_HEIGHT_V 31:0
|
||||
#define NVC1B5_SET_SRC_DEPTH (0x00000734)
|
||||
#define NVC1B5_SET_SRC_DEPTH_V 31:0
|
||||
#define NVC1B5_SET_SRC_LAYER (0x00000738)
|
||||
#define NVC1B5_SET_SRC_LAYER_V 31:0
|
||||
#define NVC1B5_SET_SRC_ORIGIN (0x0000073C)
|
||||
#define NVC1B5_SET_SRC_ORIGIN_X 15:0
|
||||
#define NVC1B5_SET_SRC_ORIGIN_Y 31:16
|
||||
#define NVC1B5_SRC_ORIGIN_X (0x00000744)
|
||||
#define NVC1B5_SRC_ORIGIN_X_VALUE 31:0
|
||||
#define NVC1B5_SRC_ORIGIN_Y (0x00000748)
|
||||
#define NVC1B5_SRC_ORIGIN_Y_VALUE 31:0
|
||||
#define NVC1B5_DST_ORIGIN_X (0x0000074C)
|
||||
#define NVC1B5_DST_ORIGIN_X_VALUE 31:0
|
||||
#define NVC1B5_DST_ORIGIN_Y (0x00000750)
|
||||
#define NVC1B5_DST_ORIGIN_Y_VALUE 31:0
|
||||
#define NVC1B5_PM_TRIGGER_END (0x00001114)
|
||||
#define NVC1B5_PM_TRIGGER_END_V 31:0
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2021 NVIDIA Corporation
|
||||
Copyright (c) 2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2021 NVIDIA Corporation
|
||||
Copyright (c) 2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2016 NVIDIA Corporation
|
||||
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
@@ -32,6 +32,10 @@ extern "C" {
|
||||
|
||||
#define VOLTA_DMA_COPY_A (0x0000C3B5)
|
||||
|
||||
#define NVC3B5_NOP (0x00000100)
|
||||
#define NVC3B5_NOP_PARAMETER 31:0
|
||||
#define NVC3B5_PM_TRIGGER (0x00000140)
|
||||
#define NVC3B5_PM_TRIGGER_V 31:0
|
||||
#define NVC3B5_SET_SEMAPHORE_A (0x00000240)
|
||||
#define NVC3B5_SET_SEMAPHORE_A_UPPER 16:0
|
||||
#define NVC3B5_SET_SEMAPHORE_B (0x00000244)
|
||||
@@ -69,6 +73,9 @@ extern "C" {
|
||||
#define NVC3B5_LAUNCH_DMA_FLUSH_ENABLE 2:2
|
||||
#define NVC3B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000)
|
||||
#define NVC3B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001)
|
||||
#define NVC3B5_LAUNCH_DMA_FLUSH_TYPE 25:25
|
||||
#define NVC3B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000)
|
||||
#define NVC3B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001)
|
||||
#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3
|
||||
#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000)
|
||||
#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001)
|
||||
@@ -123,8 +130,6 @@ extern "C" {
|
||||
#define NVC3B5_LAUNCH_DMA_VPRMODE 23:22
|
||||
#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
|
||||
#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
|
||||
#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002)
|
||||
#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003)
|
||||
#define NVC3B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
|
||||
#define NVC3B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28
|
||||
#define NVC3B5_OFFSET_IN_UPPER (0x00000400)
|
||||
@@ -195,6 +200,76 @@ extern "C" {
|
||||
#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001)
|
||||
#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002)
|
||||
#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE (0x0000070C)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_WIDTH 3:0
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH 11:8
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12
|
||||
#define NVC3B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
||||
#define NVC3B5_SET_DST_WIDTH (0x00000710)
|
||||
#define NVC3B5_SET_DST_WIDTH_V 31:0
|
||||
#define NVC3B5_SET_DST_HEIGHT (0x00000714)
|
||||
#define NVC3B5_SET_DST_HEIGHT_V 31:0
|
||||
#define NVC3B5_SET_DST_DEPTH (0x00000718)
|
||||
#define NVC3B5_SET_DST_DEPTH_V 31:0
|
||||
#define NVC3B5_SET_DST_LAYER (0x0000071C)
|
||||
#define NVC3B5_SET_DST_LAYER_V 31:0
|
||||
#define NVC3B5_SET_DST_ORIGIN (0x00000720)
|
||||
#define NVC3B5_SET_DST_ORIGIN_X 15:0
|
||||
#define NVC3B5_SET_DST_ORIGIN_Y 31:16
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE (0x00000728)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12
|
||||
#define NVC3B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
||||
#define NVC3B5_SET_SRC_WIDTH (0x0000072C)
|
||||
#define NVC3B5_SET_SRC_WIDTH_V 31:0
|
||||
#define NVC3B5_SET_SRC_HEIGHT (0x00000730)
|
||||
#define NVC3B5_SET_SRC_HEIGHT_V 31:0
|
||||
#define NVC3B5_SET_SRC_DEPTH (0x00000734)
|
||||
#define NVC3B5_SET_SRC_DEPTH_V 31:0
|
||||
#define NVC3B5_SET_SRC_LAYER (0x00000738)
|
||||
#define NVC3B5_SET_SRC_LAYER_V 31:0
|
||||
#define NVC3B5_SET_SRC_ORIGIN (0x0000073C)
|
||||
#define NVC3B5_SET_SRC_ORIGIN_X 15:0
|
||||
#define NVC3B5_SET_SRC_ORIGIN_Y 31:16
|
||||
#define NVC3B5_SRC_ORIGIN_X (0x00000744)
|
||||
#define NVC3B5_SRC_ORIGIN_X_VALUE 31:0
|
||||
#define NVC3B5_SRC_ORIGIN_Y (0x00000748)
|
||||
#define NVC3B5_SRC_ORIGIN_Y_VALUE 31:0
|
||||
#define NVC3B5_DST_ORIGIN_X (0x0000074C)
|
||||
#define NVC3B5_DST_ORIGIN_X_VALUE 31:0
|
||||
#define NVC3B5_DST_ORIGIN_Y (0x00000750)
|
||||
#define NVC3B5_DST_ORIGIN_Y_VALUE 31:0
|
||||
#define NVC3B5_PM_TRIGGER_END (0x00001114)
|
||||
#define NVC3B5_PM_TRIGGER_END_V 31:0
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
|
||||
97
kernel-open/nvidia-uvm/clcba2.h
Normal file
97
kernel-open/nvidia-uvm/clcba2.h
Normal file
@@ -0,0 +1,97 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2021-2022 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#include "nvtypes.h"
|
||||
|
||||
#ifndef _clcba2_h_
|
||||
#define _clcba2_h_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define HOPPER_SEC2_WORK_LAUNCH_A (0x0000CBA2)
|
||||
|
||||
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_HI (0x00000400)
|
||||
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_HI_DATA 24:0
|
||||
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_LO (0x00000404)
|
||||
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_LO_DATA 31:4
|
||||
#define NVCBA2_DECRYPT_COPY_DST_ADDR_HI (0x00000408)
|
||||
#define NVCBA2_DECRYPT_COPY_DST_ADDR_HI_DATA 24:0
|
||||
#define NVCBA2_DECRYPT_COPY_DST_ADDR_LO (0x0000040c)
|
||||
#define NVCBA2_DECRYPT_COPY_DST_ADDR_LO_DATA 31:4
|
||||
#define NVCBA2_DECRYPT_COPY_SIZE (0x00000410)
|
||||
#define NVCBA2_DECRYPT_COPY_SIZE_DATA 31:2
|
||||
#define NVCBA2_DECRYPT_COPY_AUTH_TAG_ADDR_HI (0x00000414)
|
||||
#define NVCBA2_DECRYPT_COPY_AUTH_TAG_ADDR_HI_DATA 24:0
|
||||
#define NVCBA2_DECRYPT_COPY_AUTH_TAG_ADDR_LO (0x00000418)
|
||||
#define NVCBA2_DECRYPT_COPY_AUTH_TAG_ADDR_LO_DATA 31:4
|
||||
#define NVCBA2_METHOD_STREAM_AUTH_TAG_ADDR_HI (0x0000041C)
|
||||
#define NVCBA2_METHOD_STREAM_AUTH_TAG_ADDR_HI_DATA 24:0
|
||||
#define NVCBA2_METHOD_STREAM_AUTH_TAG_ADDR_LO (0x00000420)
|
||||
#define NVCBA2_METHOD_STREAM_AUTH_TAG_ADDR_LO_DATA 31:4
|
||||
#define NVCBA2_SEMAPHORE_A (0x00000440)
|
||||
#define NVCBA2_SEMAPHORE_A_UPPER 24:0
|
||||
#define NVCBA2_SEMAPHORE_B (0x00000444)
|
||||
#define NVCBA2_SEMAPHORE_B_LOWER 31:2
|
||||
#define NVCBA2_SET_SEMAPHORE_PAYLOAD_LOWER (0x00000448)
|
||||
#define NVCBA2_SET_SEMAPHORE_PAYLOAD_LOWER_DATA 31:0
|
||||
#define NVCBA2_SET_SEMAPHORE_PAYLOAD_UPPER (0x0000044C)
|
||||
#define NVCBA2_SET_SEMAPHORE_PAYLOAD_UPPER_DATA 31:0
|
||||
#define NVCBA2_SEMAPHORE_D (0x00000450)
|
||||
#define NVCBA2_SEMAPHORE_D_NOTIFY_INTR 0:0
|
||||
#define NVCBA2_SEMAPHORE_D_NOTIFY_INTR_DISABLE (0x00000000)
|
||||
#define NVCBA2_SEMAPHORE_D_NOTIFY_INTR_ENABLE (0x00000001)
|
||||
#define NVCBA2_SEMAPHORE_D_PAYLOAD_SIZE 1:1
|
||||
#define NVCBA2_SEMAPHORE_D_PAYLOAD_SIZE_32_BIT (0x00000000)
|
||||
#define NVCBA2_SEMAPHORE_D_PAYLOAD_SIZE_64_BIT (0x00000001)
|
||||
#define NVCBA2_SEMAPHORE_D_TIMESTAMP 2:2
|
||||
#define NVCBA2_SEMAPHORE_D_TIMESTAMP_DISABLE (0x00000000)
|
||||
#define NVCBA2_SEMAPHORE_D_TIMESTAMP_ENABLE (0x00000001)
|
||||
#define NVCBA2_SEMAPHORE_D_FLUSH_DISABLE 3:3
|
||||
#define NVCBA2_SEMAPHORE_D_FLUSH_DISABLE_FALSE (0x00000000)
|
||||
#define NVCBA2_SEMAPHORE_D_FLUSH_DISABLE_TRUE (0x00000001)
|
||||
#define NVCBA2_EXECUTE (0x00000470)
|
||||
#define NVCBA2_EXECUTE_NOTIFY 0:0
|
||||
#define NVCBA2_EXECUTE_NOTIFY_DISABLE (0x00000000)
|
||||
#define NVCBA2_EXECUTE_NOTIFY_ENABLE (0x00000001)
|
||||
#define NVCBA2_EXECUTE_NOTIFY_ON 1:1
|
||||
#define NVCBA2_EXECUTE_NOTIFY_ON_END (0x00000000)
|
||||
#define NVCBA2_EXECUTE_NOTIFY_ON_BEGIN (0x00000001)
|
||||
#define NVCBA2_EXECUTE_FLUSH_DISABLE 2:2
|
||||
#define NVCBA2_EXECUTE_FLUSH_DISABLE_FALSE (0x00000000)
|
||||
#define NVCBA2_EXECUTE_FLUSH_DISABLE_TRUE (0x00000001)
|
||||
#define NVCBA2_EXECUTE_NOTIFY_INTR 3:3
|
||||
#define NVCBA2_EXECUTE_NOTIFY_INTR_DISABLE (0x00000000)
|
||||
#define NVCBA2_EXECUTE_NOTIFY_INTR_ENABLE (0x00000001)
|
||||
#define NVCBA2_EXECUTE_PAYLOAD_SIZE 4:4
|
||||
#define NVCBA2_EXECUTE_PAYLOAD_SIZE_32_BIT (0x00000000)
|
||||
#define NVCBA2_EXECUTE_PAYLOAD_SIZE_64_BIT (0x00000001)
|
||||
#define NVCBA2_EXECUTE_TIMESTAMP 5:5
|
||||
#define NVCBA2_EXECUTE_TIMESTAMP_DISABLE (0x00000000)
|
||||
#define NVCBA2_EXECUTE_TIMESTAMP_ENABLE (0x00000001)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
#endif
|
||||
#endif // _clcba2_h
|
||||
@@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2013-2021 NVIDIA Corporation
|
||||
Copyright (c) 2013-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2016 NVIDIA Corporation
|
||||
Copyright (c) 2016-2024 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
@@ -81,7 +81,7 @@
|
||||
#define NUM_Q_ITEMS_IN_MULTITHREAD_TEST (NUM_TEST_Q_ITEMS * NUM_TEST_KTHREADS)
|
||||
|
||||
// This exists in order to have a function to place a breakpoint on:
|
||||
void on_nvq_assert(void)
|
||||
static void on_nvq_assert(void)
|
||||
{
|
||||
(void)NULL;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -176,7 +176,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
||||
{
|
||||
|
||||
unsigned i, j;
|
||||
const static unsigned attempts = 3;
|
||||
static const unsigned attempts = 3;
|
||||
struct task_struct *thread[3];
|
||||
|
||||
for (i = 0;; i++) {
|
||||
@@ -201,7 +201,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
||||
|
||||
// Ran out of attempts - return thread even if its stack may not be
|
||||
// allocated on the preferred node
|
||||
if ((i == (attempts - 1)))
|
||||
if (i == (attempts - 1))
|
||||
break;
|
||||
|
||||
// Get the NUMA node where the first page of the stack is resident. If
|
||||
@@ -247,6 +247,11 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
|
||||
{
|
||||
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
|
||||
}
|
||||
|
||||
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
||||
// item was already pending in a queue.
|
||||
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
||||
@@ -301,7 +306,7 @@ static void _q_flush_function(void *args)
|
||||
static void _raw_q_flush(nv_kthread_q_t *q)
|
||||
{
|
||||
nv_kthread_q_item_t q_item;
|
||||
DECLARE_COMPLETION(completion);
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
|
||||
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
|
||||
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
NVIDIA_UVM_SOURCES ?=
|
||||
NVIDIA_UVM_SOURCES_CXX ?=
|
||||
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_fault_buffer.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_ce.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_host.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_mmu.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ada.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_sva.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_conf_computing.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_sec2_test.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_sec2.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_sec2.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_common.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_linux.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_debug_optimized.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/nvstatus.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/nvCpuUuid.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/nv-kthread-q.c
|
||||
@@ -28,6 +26,7 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rm_mem.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_channel.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_lock.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hal.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_processors.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_allocator.c
|
||||
@@ -58,6 +57,7 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pascal_ce.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pascal_host.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pascal_mmu.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pascal_fault_buffer.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_ce.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_host.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_mmu.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta.c
|
||||
@@ -72,6 +72,12 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_ce.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_host.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_mmu.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_fault_buffer.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_ce.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_host.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_mmu.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ada.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_policy.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_utils.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_kvmalloc.c
|
||||
@@ -94,7 +100,6 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test_rng.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree_test.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_allocator_test.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_gpu_semaphore_test.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hmm_sanity_test.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_mem_test.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rm_mem_test.c
|
||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_page_tree_test.c
|
||||
|
||||
@@ -13,19 +13,6 @@ NVIDIA_UVM_OBJECTS =
|
||||
include $(src)/nvidia-uvm/nvidia-uvm-sources.Kbuild
|
||||
NVIDIA_UVM_OBJECTS += $(patsubst %.c,%.o,$(NVIDIA_UVM_SOURCES))
|
||||
|
||||
# Some linux kernel functions rely on being built with optimizations on and
|
||||
# to work around this we put wrappers for them in a separate file that's built
|
||||
# with optimizations on in debug builds and skipped in other builds.
|
||||
# Notably gcc 4.4 supports per function optimization attributes that would be
|
||||
# easier to use, but is too recent to rely on for now.
|
||||
NVIDIA_UVM_DEBUG_OPTIMIZED_SOURCE := nvidia-uvm/uvm_debug_optimized.c
|
||||
NVIDIA_UVM_DEBUG_OPTIMIZED_OBJECT := $(patsubst %.c,%.o,$(NVIDIA_UVM_DEBUG_OPTIMIZED_SOURCE))
|
||||
|
||||
ifneq ($(UVM_BUILD_TYPE),debug)
|
||||
# Only build the wrappers on debug builds
|
||||
NVIDIA_UVM_OBJECTS := $(filter-out $(NVIDIA_UVM_DEBUG_OPTIMIZED_OBJECT), $(NVIDIA_UVM_OBJECTS))
|
||||
endif
|
||||
|
||||
obj-m += nvidia-uvm.o
|
||||
nvidia-uvm-y := $(NVIDIA_UVM_OBJECTS)
|
||||
|
||||
@@ -36,15 +23,14 @@ NVIDIA_UVM_KO = nvidia-uvm/nvidia-uvm.ko
|
||||
#
|
||||
|
||||
ifeq ($(UVM_BUILD_TYPE),debug)
|
||||
NVIDIA_UVM_CFLAGS += -DDEBUG $(call cc-option,-Og,-O0) -g
|
||||
else
|
||||
ifeq ($(UVM_BUILD_TYPE),develop)
|
||||
# -DDEBUG is required, in order to allow pr_devel() print statements to
|
||||
# work:
|
||||
NVIDIA_UVM_CFLAGS += -DDEBUG
|
||||
NVIDIA_UVM_CFLAGS += -DNVIDIA_UVM_DEVELOP
|
||||
endif
|
||||
NVIDIA_UVM_CFLAGS += -O2
|
||||
NVIDIA_UVM_CFLAGS += -DDEBUG -g
|
||||
endif
|
||||
|
||||
ifeq ($(UVM_BUILD_TYPE),develop)
|
||||
# -DDEBUG is required, in order to allow pr_devel() print statements to
|
||||
# work:
|
||||
NVIDIA_UVM_CFLAGS += -DDEBUG
|
||||
NVIDIA_UVM_CFLAGS += -DNVIDIA_UVM_DEVELOP
|
||||
endif
|
||||
|
||||
NVIDIA_UVM_CFLAGS += -DNVIDIA_UVM_ENABLED
|
||||
@@ -56,11 +42,6 @@ NVIDIA_UVM_CFLAGS += -I$(src)/nvidia-uvm
|
||||
|
||||
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_UVM_OBJECTS), $(NVIDIA_UVM_CFLAGS))
|
||||
|
||||
ifeq ($(UVM_BUILD_TYPE),debug)
|
||||
# Force optimizations on for the wrappers
|
||||
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_UVM_DEBUG_OPTIMIZED_OBJECT), $(NVIDIA_UVM_CFLAGS) -O2)
|
||||
endif
|
||||
|
||||
#
|
||||
# Register the conftests needed by nvidia-uvm.ko
|
||||
#
|
||||
@@ -81,8 +62,14 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioasid_get
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += migrate_vma_setup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_pasid_drop
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mmget_not_zero
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mmgrab
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_sva_bind_device_has_drvdata_arg
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_fault_to_errno
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += find_next_bit_wrap
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_is_dma_domain
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += folio_test_swapcache
|
||||
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += backing_dev_info
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_context_t
|
||||
@@ -96,10 +83,19 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_invalidate_range
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_arch_invalidate_secondary_tlbs
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_vma_added_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += make_device_exclusive_range
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_device_range
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_mm_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mpol_preferred_many_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_interval_notifier
|
||||
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_migrate_vma_setup
|
||||
|
||||
@@ -24,16 +24,17 @@
|
||||
#include "nvstatus.h"
|
||||
|
||||
#if !defined(NV_PRINTF_STRING_SECTION)
|
||||
#if defined(NVRM) && NVCPU_IS_RISCV64
|
||||
#define NV_PRINTF_STRING_SECTION __attribute__ ((section (".logging")))
|
||||
#else // defined(NVRM) && NVCPU_IS_RISCV64
|
||||
#if defined(NVRM) && NVOS_IS_LIBOS
|
||||
#include "libos_log.h"
|
||||
#define NV_PRINTF_STRING_SECTION LIBOS_SECTION_LOGGING
|
||||
#else // defined(NVRM) && NVOS_IS_LIBOS
|
||||
#define NV_PRINTF_STRING_SECTION
|
||||
#endif // defined(NVRM) && NVCPU_IS_RISCV64
|
||||
#endif // defined(NVRM) && NVOS_IS_LIBOS
|
||||
#endif // !defined(NV_PRINTF_STRING_SECTION)
|
||||
|
||||
/*
|
||||
* Include nvstatuscodes.h twice. Once for creating constant strings in the
|
||||
* the NV_PRINTF_STRING_SECTION section of the ececutable, and once to build
|
||||
* the NV_PRINTF_STRING_SECTION section of the executable, and once to build
|
||||
* the g_StatusCodeList table.
|
||||
*/
|
||||
#undef NV_STATUS_CODE
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2015-2022 NVIDIA Corporation
|
||||
Copyright (c) 2015-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
@@ -28,6 +28,7 @@
|
||||
#include "uvm_lock.h"
|
||||
#include "uvm_test.h"
|
||||
#include "uvm_va_space.h"
|
||||
#include "uvm_va_space_mm.h"
|
||||
#include "uvm_va_range.h"
|
||||
#include "uvm_va_block.h"
|
||||
#include "uvm_tools.h"
|
||||
@@ -35,26 +36,200 @@
|
||||
#include "uvm_linux_ioctl.h"
|
||||
#include "uvm_hmm.h"
|
||||
#include "uvm_mem.h"
|
||||
#include "uvm_kvmalloc.h"
|
||||
|
||||
#define NVIDIA_UVM_DEVICE_NAME "nvidia-uvm"
|
||||
|
||||
static dev_t g_uvm_base_dev;
|
||||
static struct cdev g_uvm_cdev;
|
||||
static const struct file_operations uvm_fops;
|
||||
|
||||
static int uvm_open(struct inode *inode, struct file *filp)
|
||||
bool uvm_file_is_nvidia_uvm(struct file *filp)
|
||||
{
|
||||
NV_STATUS status = uvm_global_get_status();
|
||||
return (filp != NULL) && (filp->f_op == &uvm_fops);
|
||||
}
|
||||
|
||||
if (status == NV_OK) {
|
||||
if (!uvm_down_read_trylock(&g_uvm_global.pm.lock))
|
||||
return -EAGAIN;
|
||||
uvm_fd_type_t uvm_fd_type(struct file *filp, void **ptr_val)
|
||||
{
|
||||
unsigned long uptr;
|
||||
uvm_fd_type_t type;
|
||||
void *ptr;
|
||||
|
||||
status = uvm_va_space_create(inode, filp);
|
||||
UVM_ASSERT(uvm_file_is_nvidia_uvm(filp));
|
||||
|
||||
uvm_up_read(&g_uvm_global.pm.lock);
|
||||
uptr = atomic_long_read_acquire((atomic_long_t *) (&filp->private_data));
|
||||
type = (uvm_fd_type_t)(uptr & UVM_FD_TYPE_MASK);
|
||||
ptr = (void *)(uptr & ~UVM_FD_TYPE_MASK);
|
||||
BUILD_BUG_ON(UVM_FD_COUNT > UVM_FD_TYPE_MASK + 1);
|
||||
|
||||
switch (type) {
|
||||
case UVM_FD_UNINITIALIZED:
|
||||
case UVM_FD_INITIALIZING:
|
||||
UVM_ASSERT(!ptr);
|
||||
break;
|
||||
|
||||
case UVM_FD_VA_SPACE:
|
||||
UVM_ASSERT(ptr);
|
||||
BUILD_BUG_ON(__alignof__(uvm_va_space_t) < (1UL << UVM_FD_TYPE_BITS));
|
||||
break;
|
||||
|
||||
case UVM_FD_MM:
|
||||
UVM_ASSERT(ptr);
|
||||
BUILD_BUG_ON(__alignof__(struct file) < (1UL << UVM_FD_TYPE_BITS));
|
||||
break;
|
||||
|
||||
default:
|
||||
UVM_ASSERT(0);
|
||||
}
|
||||
|
||||
return -nv_status_to_errno(status);
|
||||
if (ptr_val)
|
||||
*ptr_val = ptr;
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
void *uvm_fd_get_type(struct file *filp, uvm_fd_type_t type)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
UVM_ASSERT(uvm_file_is_nvidia_uvm(filp));
|
||||
|
||||
if (uvm_fd_type(filp, &ptr) == type)
|
||||
return ptr;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static NV_STATUS uvm_api_mm_initialize(UVM_MM_INITIALIZE_PARAMS *params, struct file *filp)
|
||||
{
|
||||
uvm_va_space_t *va_space;
|
||||
uvm_va_space_mm_t *va_space_mm;
|
||||
struct file *uvm_file;
|
||||
uvm_fd_type_t old_fd_type;
|
||||
struct mm_struct *mm;
|
||||
NV_STATUS status;
|
||||
|
||||
uvm_file = fget(params->uvmFd);
|
||||
if (!uvm_file_is_nvidia_uvm(uvm_file)) {
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (uvm_fd_type(uvm_file, (void **)&va_space) != UVM_FD_VA_SPACE) {
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
// Tell userspace the MM FD is not required and it may be released
|
||||
// with no loss of functionality.
|
||||
if (!uvm_va_space_mm_enabled(va_space)) {
|
||||
status = NV_WARN_NOTHING_TO_DO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
old_fd_type = atomic_long_cmpxchg((atomic_long_t *)&filp->private_data,
|
||||
UVM_FD_UNINITIALIZED,
|
||||
UVM_FD_INITIALIZING);
|
||||
old_fd_type &= UVM_FD_TYPE_MASK;
|
||||
if (old_fd_type != UVM_FD_UNINITIALIZED) {
|
||||
status = NV_ERR_IN_USE;
|
||||
goto err;
|
||||
}
|
||||
|
||||
va_space_mm = &va_space->va_space_mm;
|
||||
uvm_spin_lock(&va_space_mm->lock);
|
||||
switch (va_space->va_space_mm.state) {
|
||||
// We only allow the va_space_mm to be initialised once. If
|
||||
// userspace passed the UVM FD to another process it is up to
|
||||
// userspace to ensure it also passes the UVM MM FD that
|
||||
// initialised the va_space_mm or arranges some other way to keep
|
||||
// a reference on the FD.
|
||||
case UVM_VA_SPACE_MM_STATE_ALIVE:
|
||||
status = NV_ERR_INVALID_STATE;
|
||||
goto err_release_unlock;
|
||||
break;
|
||||
|
||||
// Once userspace has released the va_space_mm the GPU is
|
||||
// effectively dead and no new work can be started. We don't
|
||||
// support re-initializing once userspace has closed the FD.
|
||||
case UVM_VA_SPACE_MM_STATE_RELEASED:
|
||||
status = NV_ERR_PAGE_TABLE_NOT_AVAIL;
|
||||
goto err_release_unlock;
|
||||
break;
|
||||
|
||||
// Keep the warnings at bay
|
||||
case UVM_VA_SPACE_MM_STATE_UNINITIALIZED:
|
||||
mm = va_space->va_space_mm.mm;
|
||||
if (!mm || !mmget_not_zero(mm)) {
|
||||
status = NV_ERR_PAGE_TABLE_NOT_AVAIL;
|
||||
goto err_release_unlock;
|
||||
}
|
||||
|
||||
va_space_mm->state = UVM_VA_SPACE_MM_STATE_ALIVE;
|
||||
break;
|
||||
|
||||
default:
|
||||
UVM_ASSERT(0);
|
||||
break;
|
||||
}
|
||||
uvm_spin_unlock(&va_space_mm->lock);
|
||||
atomic_long_set_release((atomic_long_t *)&filp->private_data, (long)uvm_file | UVM_FD_MM);
|
||||
|
||||
return NV_OK;
|
||||
|
||||
err_release_unlock:
|
||||
uvm_spin_unlock(&va_space_mm->lock);
|
||||
atomic_long_set_release((atomic_long_t *)&filp->private_data, UVM_FD_UNINITIALIZED);
|
||||
|
||||
err:
|
||||
if (uvm_file)
|
||||
fput(uvm_file);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
// Called when opening /dev/nvidia-uvm. This code doesn't take any UVM locks, so
|
||||
// there's no need to acquire g_uvm_global.pm.lock, but if that changes the PM
|
||||
// lock will need to be taken.
|
||||
static int uvm_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
NV_STATUS status = uvm_global_get_status();
|
||||
|
||||
if (status != NV_OK)
|
||||
return -nv_status_to_errno(status);
|
||||
|
||||
mapping = uvm_kvmalloc(sizeof(*mapping));
|
||||
if (!mapping)
|
||||
return -ENOMEM;
|
||||
|
||||
// By default all struct files on the same inode share the same
|
||||
// address_space structure (the inode's) across all processes. This means
|
||||
// unmap_mapping_range would unmap virtual mappings across all processes on
|
||||
// that inode.
|
||||
//
|
||||
// Since the UVM driver uses the mapping offset as the VA of the file's
|
||||
// process, we need to isolate the mappings to each process.
|
||||
address_space_init_once(mapping);
|
||||
mapping->host = inode;
|
||||
|
||||
// Some paths in the kernel, for example force_page_cache_readahead which
|
||||
// can be invoked from user-space via madvise MADV_WILLNEED and fadvise
|
||||
// POSIX_FADV_WILLNEED, check the function pointers within
|
||||
// file->f_mapping->a_ops for validity. However, those paths assume that a_ops
|
||||
// itself is always valid. Handle that by using the inode's a_ops pointer,
|
||||
// which is what f_mapping->a_ops would point to anyway if we weren't re-
|
||||
// assigning f_mapping.
|
||||
mapping->a_ops = inode->i_mapping->a_ops;
|
||||
|
||||
#if defined(NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO)
|
||||
mapping->backing_dev_info = inode->i_mapping->backing_dev_info;
|
||||
#endif
|
||||
|
||||
filp->private_data = NULL;
|
||||
filp->f_mapping = mapping;
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static int uvm_open_entry(struct inode *inode, struct file *filp)
|
||||
@@ -78,11 +253,44 @@ static void uvm_release_deferred(void *data)
|
||||
uvm_up_read(&g_uvm_global.pm.lock);
|
||||
}
|
||||
|
||||
static void uvm_mm_release(struct file *filp, struct file *uvm_file)
|
||||
{
|
||||
uvm_va_space_t *va_space = uvm_va_space_get(uvm_file);
|
||||
uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm;
|
||||
struct mm_struct *mm = va_space_mm->mm;
|
||||
|
||||
if (uvm_va_space_mm_enabled(va_space)) {
|
||||
uvm_va_space_mm_unregister(va_space);
|
||||
|
||||
if (uvm_va_space_mm_enabled(va_space))
|
||||
uvm_mmput(mm);
|
||||
|
||||
va_space_mm->mm = NULL;
|
||||
fput(uvm_file);
|
||||
}
|
||||
}
|
||||
|
||||
static int uvm_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
uvm_va_space_t *va_space = uvm_va_space_get(filp);
|
||||
void *ptr;
|
||||
uvm_va_space_t *va_space;
|
||||
uvm_fd_type_t fd_type;
|
||||
int ret;
|
||||
|
||||
fd_type = uvm_fd_type(filp, &ptr);
|
||||
UVM_ASSERT(fd_type != UVM_FD_INITIALIZING);
|
||||
if (fd_type == UVM_FD_UNINITIALIZED) {
|
||||
uvm_kvfree(filp->f_mapping);
|
||||
return 0;
|
||||
}
|
||||
else if (fd_type == UVM_FD_MM) {
|
||||
uvm_kvfree(filp->f_mapping);
|
||||
uvm_mm_release(filp, (struct file *)ptr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
UVM_ASSERT(fd_type == UVM_FD_VA_SPACE);
|
||||
va_space = (uvm_va_space_t *)ptr;
|
||||
filp->private_data = NULL;
|
||||
filp->f_mapping = NULL;
|
||||
|
||||
@@ -100,7 +308,7 @@ static int uvm_release(struct inode *inode, struct file *filp)
|
||||
// been destroyed, and va_space->mapping won't be used again. Still,
|
||||
// the va_space survives the inode if its destruction is deferred, in
|
||||
// which case the references are rendered stale.
|
||||
address_space_init_once(&va_space->mapping);
|
||||
address_space_init_once(va_space->mapping);
|
||||
|
||||
nv_kthread_q_item_init(&va_space->deferred_release_q_item, uvm_release_deferred, va_space);
|
||||
ret = nv_kthread_q_schedule_q_item(&g_uvm_global.deferred_release_q, &va_space->deferred_release_q_item);
|
||||
@@ -363,14 +571,11 @@ static void uvm_vm_open_managed_entry(struct vm_area_struct *vma)
|
||||
static void uvm_vm_close_managed(struct vm_area_struct *vma)
|
||||
{
|
||||
uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
|
||||
uvm_gpu_t *gpu;
|
||||
bool make_zombie = false;
|
||||
|
||||
if (current->mm != NULL)
|
||||
uvm_record_lock_mmap_lock_write(current->mm);
|
||||
|
||||
UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK);
|
||||
|
||||
// current->mm will be NULL on process teardown, in which case we have
|
||||
// special handling.
|
||||
if (current->mm == NULL) {
|
||||
@@ -400,14 +605,6 @@ static void uvm_vm_close_managed(struct vm_area_struct *vma)
|
||||
|
||||
uvm_destroy_vma_managed(vma, make_zombie);
|
||||
|
||||
// Notify GPU address spaces that the fault buffer needs to be flushed to avoid finding stale entries
|
||||
// that can be attributed to new VA ranges reallocated at the same address
|
||||
for_each_va_space_gpu_in_mask(gpu, va_space, &va_space->registered_gpu_va_spaces) {
|
||||
uvm_gpu_va_space_t *gpu_va_space = uvm_gpu_va_space_get(va_space, gpu);
|
||||
UVM_ASSERT(gpu_va_space);
|
||||
|
||||
gpu_va_space->needs_fault_buffer_flush = true;
|
||||
}
|
||||
uvm_va_space_up_write(va_space);
|
||||
|
||||
if (current->mm != NULL)
|
||||
@@ -485,6 +682,9 @@ static void uvm_vm_open_semaphore_pool(struct vm_area_struct *vma)
|
||||
// Semaphore pool vmas do not have vma wrappers, but some functions will
|
||||
// assume vm_private_data is a wrapper.
|
||||
vma->vm_private_data = NULL;
|
||||
#if defined(VM_WIPEONFORK)
|
||||
nv_vm_flags_set(vma, VM_WIPEONFORK);
|
||||
#endif
|
||||
|
||||
if (is_fork) {
|
||||
// If we forked, leave the parent vma alone.
|
||||
@@ -556,7 +756,7 @@ static struct vm_operations_struct uvm_vm_ops_semaphore_pool =
|
||||
|
||||
static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
uvm_va_space_t *va_space = uvm_va_space_get(filp);
|
||||
uvm_va_space_t *va_space;
|
||||
uvm_va_range_t *va_range;
|
||||
NV_STATUS status = uvm_global_get_status();
|
||||
int ret = 0;
|
||||
@@ -565,8 +765,8 @@ static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
if (status != NV_OK)
|
||||
return -nv_status_to_errno(status);
|
||||
|
||||
status = uvm_va_space_initialized(va_space);
|
||||
if (status != NV_OK)
|
||||
va_space = uvm_fd_va_space(filp);
|
||||
if (!va_space)
|
||||
return -EBADFD;
|
||||
|
||||
// When the VA space is associated with an mm, all vmas under the VA space
|
||||
@@ -618,7 +818,11 @@ static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
// Using VM_DONTCOPY would be nice, but madvise(MADV_DOFORK) can reset that
|
||||
// so we have to handle vm_open on fork anyway. We could disable MADV_DOFORK
|
||||
// with VM_IO, but that causes other mapping issues.
|
||||
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
// Make the default behavior be VM_DONTCOPY to avoid the performance impact
|
||||
// of removing CPU mappings in the parent on fork()+exec(). Users can call
|
||||
// madvise(MDV_DOFORK) if the child process requires access to the
|
||||
// allocation.
|
||||
nv_vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTCOPY);
|
||||
|
||||
vma->vm_ops = &uvm_vm_ops_managed;
|
||||
|
||||
@@ -678,6 +882,13 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool uvm_vma_is_managed(struct vm_area_struct *vma)
|
||||
{
|
||||
return vma->vm_ops == &uvm_vm_ops_disabled ||
|
||||
vma->vm_ops == &uvm_vm_ops_managed ||
|
||||
vma->vm_ops == &uvm_vm_ops_semaphore_pool;
|
||||
}
|
||||
|
||||
static int uvm_mmap_entry(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
UVM_ENTRY_RET(uvm_mmap(filp, vma));
|
||||
@@ -685,7 +896,57 @@ static int uvm_mmap_entry(struct file *filp, struct vm_area_struct *vma)
|
||||
|
||||
static NV_STATUS uvm_api_initialize(UVM_INITIALIZE_PARAMS *params, struct file *filp)
|
||||
{
|
||||
return uvm_va_space_initialize(uvm_va_space_get(filp), params->flags);
|
||||
uvm_va_space_t *va_space;
|
||||
NV_STATUS status;
|
||||
uvm_fd_type_t old_fd_type;
|
||||
|
||||
// Normally we expect private_data == UVM_FD_UNINITIALIZED. However multiple
|
||||
// threads may call this ioctl concurrently so we have to be careful to
|
||||
// avoid initializing multiple va_spaces and/or leaking memory. To do this
|
||||
// we do an atomic compare and swap. Only one thread will observe
|
||||
// UVM_FD_UNINITIALIZED and that thread will allocate and setup the
|
||||
// va_space.
|
||||
//
|
||||
// Other threads will either see UVM_FD_INITIALIZING or UVM_FD_VA_SPACE. In
|
||||
// the case of UVM_FD_VA_SPACE we return success if and only if the
|
||||
// initialization flags match. If another thread is still initializing the
|
||||
// va_space we return NV_ERR_BUSY_RETRY.
|
||||
//
|
||||
// If va_space initialization fails we return the failure code and reset the
|
||||
// FD state back to UVM_FD_UNINITIALIZED to allow another initialization
|
||||
// attempt to be made. This is safe because other threads will have only had
|
||||
// a chance to observe UVM_FD_INITIALIZING and not UVM_FD_VA_SPACE in this
|
||||
// case.
|
||||
old_fd_type = atomic_long_cmpxchg((atomic_long_t *)&filp->private_data,
|
||||
UVM_FD_UNINITIALIZED,
|
||||
UVM_FD_INITIALIZING);
|
||||
old_fd_type &= UVM_FD_TYPE_MASK;
|
||||
if (old_fd_type == UVM_FD_UNINITIALIZED) {
|
||||
status = uvm_va_space_create(filp->f_mapping, &va_space, params->flags);
|
||||
if (status != NV_OK) {
|
||||
atomic_long_set_release((atomic_long_t *)&filp->private_data, UVM_FD_UNINITIALIZED);
|
||||
return status;
|
||||
}
|
||||
|
||||
atomic_long_set_release((atomic_long_t *)&filp->private_data, (long)va_space | UVM_FD_VA_SPACE);
|
||||
}
|
||||
else if (old_fd_type == UVM_FD_VA_SPACE) {
|
||||
va_space = uvm_va_space_get(filp);
|
||||
|
||||
if (params->flags != va_space->initialization_flags)
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
else
|
||||
status = NV_OK;
|
||||
}
|
||||
else if (old_fd_type == UVM_FD_MM) {
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
else {
|
||||
UVM_ASSERT(old_fd_type == UVM_FD_INITIALIZING);
|
||||
status = NV_ERR_BUSY_RETRY;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static NV_STATUS uvm_api_pageable_mem_access(UVM_PAGEABLE_MEM_ACCESS_PARAMS *params, struct file *filp)
|
||||
@@ -703,6 +964,7 @@ static long uvm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
return 0;
|
||||
|
||||
UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_INITIALIZE, uvm_api_initialize);
|
||||
UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_MM_INITIALIZE, uvm_api_mm_initialize);
|
||||
|
||||
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_PAGEABLE_MEM_ACCESS, uvm_api_pageable_mem_access);
|
||||
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_PAGEABLE_MEM_ACCESS_ON_GPU, uvm_api_pageable_mem_access_on_gpu);
|
||||
@@ -782,11 +1044,6 @@ static const struct file_operations uvm_fops =
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
bool uvm_file_is_nvidia_uvm(struct file *filp)
|
||||
{
|
||||
return (filp != NULL) && (filp->f_op == &uvm_fops);
|
||||
}
|
||||
|
||||
NV_STATUS uvm_test_register_unload_state_buffer(UVM_TEST_REGISTER_UNLOAD_STATE_BUFFER_PARAMS *params, struct file *filp)
|
||||
{
|
||||
long ret;
|
||||
@@ -800,7 +1057,7 @@ NV_STATUS uvm_test_register_unload_state_buffer(UVM_TEST_REGISTER_UNLOAD_STATE_B
|
||||
// are not used because unload_state_buf may be a managed memory pointer and
|
||||
// therefore a locking assertion from the CPU fault handler could be fired.
|
||||
nv_mmap_read_lock(current->mm);
|
||||
ret = NV_PIN_USER_PAGES(params->unload_state_buf, 1, FOLL_WRITE, &page, NULL);
|
||||
ret = NV_PIN_USER_PAGES(params->unload_state_buf, 1, FOLL_WRITE, &page);
|
||||
nv_mmap_read_unlock(current->mm);
|
||||
|
||||
if (ret < 0)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2021 NVIDIA Corporation
|
||||
Copyright (c) 2021-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
@@ -49,11 +49,13 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
||||
// A single top level PDE on Ada covers 128 TB and that's the minimum size
|
||||
// that can be used.
|
||||
parent_gpu->rm_va_base = 0;
|
||||
parent_gpu->rm_va_size = 128ull * 1024 * 1024 * 1024 * 1024;
|
||||
parent_gpu->rm_va_size = 128 * UVM_SIZE_1TB;
|
||||
|
||||
parent_gpu->uvm_mem_va_base = 384ull * 1024 * 1024 * 1024 * 1024;
|
||||
parent_gpu->uvm_mem_va_base = 384 * UVM_SIZE_1TB;
|
||||
parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE;
|
||||
|
||||
parent_gpu->ce_phys_vidmem_write_supported = true;
|
||||
|
||||
parent_gpu->peer_copy_mode = g_uvm_global.peer_copy_mode;
|
||||
|
||||
// Not all units on Ada support 49-bit addressing, including those which
|
||||
@@ -77,6 +79,8 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
||||
|
||||
parent_gpu->access_counters_supported = true;
|
||||
|
||||
parent_gpu->access_counters_can_use_physical_addresses = false;
|
||||
|
||||
parent_gpu->fault_cancel_va_supported = true;
|
||||
|
||||
parent_gpu->scoped_atomics_supported = true;
|
||||
@@ -92,4 +96,6 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
||||
parent_gpu->map_remap_larger_page_promotion = false;
|
||||
|
||||
parent_gpu->plc_supported = true;
|
||||
|
||||
parent_gpu->no_ats_range_required = false;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
Copyright (c) 2018-20221 NVIDIA Corporation
|
||||
Copyright (c) 2018-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
@@ -38,23 +38,27 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
||||
|
||||
parent_gpu->utlb_per_gpc_count = uvm_ampere_get_utlbs_per_gpc(parent_gpu);
|
||||
|
||||
parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount * parent_gpu->utlb_per_gpc_count;
|
||||
parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount *
|
||||
parent_gpu->utlb_per_gpc_count;
|
||||
{
|
||||
uvm_fault_buffer_entry_t *dummy;
|
||||
UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8)));
|
||||
UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 <<
|
||||
(sizeof(dummy->fault_source.utlb_id) * 8)));
|
||||
}
|
||||
|
||||
// A single top level PDE on Ampere covers 128 TB and that's the minimum
|
||||
// size that can be used.
|
||||
parent_gpu->rm_va_base = 0;
|
||||
parent_gpu->rm_va_size = 128ull * 1024 * 1024 * 1024 * 1024;
|
||||
parent_gpu->rm_va_size = 128 * UVM_SIZE_1TB;
|
||||
|
||||
parent_gpu->uvm_mem_va_base = 384ull * 1024 * 1024 * 1024 * 1024;
|
||||
parent_gpu->uvm_mem_va_base = 384 * UVM_SIZE_1TB;
|
||||
parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE;
|
||||
|
||||
// See uvm_mmu.h for mapping placement
|
||||
parent_gpu->flat_vidmem_va_base = 136ull * 1024 * 1024 * 1024 * 1024;
|
||||
parent_gpu->flat_sysmem_va_base = 256ull * 1024 * 1024 * 1024 * 1024;
|
||||
parent_gpu->flat_vidmem_va_base = 160 * UVM_SIZE_1TB;
|
||||
parent_gpu->flat_sysmem_va_base = 256 * UVM_SIZE_1TB;
|
||||
|
||||
parent_gpu->ce_phys_vidmem_write_supported = true;
|
||||
|
||||
parent_gpu->peer_copy_mode = g_uvm_global.peer_copy_mode;
|
||||
|
||||
@@ -79,6 +83,8 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
||||
|
||||
parent_gpu->access_counters_supported = true;
|
||||
|
||||
parent_gpu->access_counters_can_use_physical_addresses = false;
|
||||
|
||||
parent_gpu->fault_cancel_va_supported = true;
|
||||
|
||||
parent_gpu->scoped_atomics_supported = true;
|
||||
@@ -99,4 +105,6 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
||||
parent_gpu->map_remap_larger_page_promotion = false;
|
||||
|
||||
parent_gpu->plc_supported = true;
|
||||
|
||||
parent_gpu->no_ats_range_required = false;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user